gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 24 20:46:25 2017
@author: mtkes
"""
# import the necessary packages
#from __future__ import print_function
import cv2
import time
# import the necessary packages
from threading import Lock
from threading import Thread
class FrameRate:
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
self._rate = 0.0
def start(self):
# start the timer
self._numFrames = 0
self._start = time.time()
return self
def reset(self):
self.start()
def stop(self):
# stop the timer
self._end = time.time()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (time.time() - self._start)
def fps(self):
# compute the (approximate) frames per second
if (self._numFrames > 10):
self._rate = self._numFrames / self.elapsed()
self.reset()
return self._rate
class BucketCapture:
def __init__(self, src=0):
self._lock = Lock()
self.fps = FrameRate()
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self._grabbed, self._frame) = self.stream.read()
if (self._grabbed == True):
self.grabbed = self._grabbed
self.frame = self._frame
self.outFrame = self.frame
self.count = 1
self.outCount = self.count
else:
self.grabbed = False
self.frame = None
self.outFrame = None
self.count = 0
self.outCount = self.count
# initialize the variable used to indicate if the thread should
# be stopped
self._stop = False
self.stopped = True
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
self.stopped = False
self.fps.start()
while True:
# if the thread indicator variable is set, stop the thread
if (self.stop == True):
self.stop = False
self.stopped = True
return
# otherwise, read the next frame from the stream
(self._grabbed, self._frame) = self.stream.read()
self.fps.update()
# if something was grabbed and retreived then lock
# the outbound buffer for the update
# This limits the blocking to just the copy operations
# later we may consider a queue or double buffer to
# minimize blocking
if (self._grabbed == True):
self._lock.acquire(blocking=True)
self.count = self.count + 1
self.grabbed = self._grabbed
self.frame = self._frame
self._lock.release()
def read(self):
# return the frame most recently read if the frame
# is not being updated at this exact moment
if (self._lock.acquire(blocking=True) == True):
self.outFrame = self.frame
self.outCount = self.count
self._lock.release()
return (self.outFrame, self.outCount, True)
else:
return (self.outFrame, self.outCount, False)
def stop(self):
# indicate that the thread should be stopped
self._stop = True
class BlueBoiler:
"""
An OpenCV pipeline generated by GRIP.
"""
def __init__(self):
"""initializes all values to presets or None if need to be set
"""
self.__resize_image_width = 320.0
self.__resize_image_height = 240.0
self.__resize_image_interpolation = cv2.INTER_CUBIC
self.resize_image_output = None
self.__rgb_threshold_input = self.resize_image_output
self.__rgb_threshold_red = [0.0, 39.59897610921503]
self.__rgb_threshold_green = [82.55395683453237, 223.39410813723725]
self.__rgb_threshold_blue = [162.81474820143887, 255.0]
self.rgb_threshold_output = None
self.__find_contours_input = self.rgb_threshold_output
self.__find_contours_external_only = True
self.find_contours_output = None
self.__filter_contours_contours = self.find_contours_output
self.__filter_contours_min_area = 20.0
self.__filter_contours_min_perimeter = 0.0
self.__filter_contours_min_width = 0.0
self.__filter_contours_max_width = 1000.0
self.__filter_contours_min_height = 0.0
self.__filter_contours_max_height = 1000.0
self.__filter_contours_solidity = [0, 100]
self.__filter_contours_max_vertices = 1000000.0
self.__filter_contours_min_vertices = 0.0
self.__filter_contours_min_ratio = 0.0
self.__filter_contours_max_ratio = 1000.0
self.filter_contours_output = None
def process(self, source0):
"""
Runs the pipeline and sets all outputs to new values.
"""
# Step Resize_Image0:
self.__resize_image_input = source0
(self.resize_image_output) = self.__resize_image(self.__resize_image_input, self.__resize_image_width, self.__resize_image_height, self.__resize_image_interpolation)
# Step RGB_Threshold0:
self.__rgb_threshold_input = self.resize_image_output
(self.rgb_threshold_output) = self.__rgb_threshold(self.__rgb_threshold_input, self.__rgb_threshold_red, self.__rgb_threshold_green, self.__rgb_threshold_blue)
# Step Find_Contours0:
self.__find_contours_input = self.rgb_threshold_output
(self.find_contours_output) = self.__find_contours(self.__find_contours_input, self.__find_contours_external_only)
# Step Filter_Contours0:
self.__filter_contours_contours = self.find_contours_output
(self.filter_contours_output) = self.__filter_contours(self.__filter_contours_contours, self.__filter_contours_min_area, self.__filter_contours_min_perimeter, self.__filter_contours_min_width, self.__filter_contours_max_width, self.__filter_contours_min_height, self.__filter_contours_max_height, self.__filter_contours_solidity, self.__filter_contours_max_vertices, self.__filter_contours_min_vertices, self.__filter_contours_min_ratio, self.__filter_contours_max_ratio)
return (self.find_contours_output, self.filter_contours_output)
@staticmethod
def __resize_image(input, width, height, interpolation):
"""Scales and image to an exact size.
Args:
input: A numpy.ndarray.
Width: The desired width in pixels.
Height: The desired height in pixels.
interpolation: Opencv enum for the type fo interpolation.
Returns:
A numpy.ndarray of the new size.
"""
return cv2.resize(input, ((int)(width), (int)(height)), 0, 0, interpolation)
@staticmethod
def __rgb_threshold(input, red, green, blue):
"""Segment an image based on color ranges.
Args:
input: A BGR numpy.ndarray.
red: A list of two numbers the are the min and max red.
green: A list of two numbers the are the min and max green.
blue: A list of two numbers the are the min and max blue.
Returns:
A black and white numpy.ndarray.
"""
out = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)
return cv2.inRange(out, (red[0], green[0], blue[0]), (red[1], green[1], blue[1]))
@staticmethod
def __find_contours(input, external_only):
"""Sets the values of pixels in a binary image to their distance to the nearest black pixel.
Args:
input: A numpy.ndarray.
external_only: A boolean. If true only external contours are found.
Return:
A list of numpy.ndarray where each one represents a contour.
"""
if(external_only):
mode = cv2.RETR_EXTERNAL
else:
mode = cv2.RETR_LIST
method = cv2.CHAIN_APPROX_SIMPLE
im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
return contours
@staticmethod
def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,
min_height, max_height, solidity, max_vertex_count, min_vertex_count,
min_ratio, max_ratio):
"""Filters out contours that do not meet certain criteria.
Args:
input_contours: Contours as a list of numpy.ndarray.
min_area: The minimum area of a contour that will be kept.
min_perimeter: The minimum perimeter of a contour that will be kept.
min_width: Minimum width of a contour.
max_width: MaxWidth maximum width.
min_height: Minimum height.
max_height: Maximimum height.
solidity: The minimum and maximum solidity of a contour.
min_vertex_count: Minimum vertex Count of the contours.
max_vertex_count: Maximum vertex Count.
min_ratio: Minimum ratio of width to height.
max_ratio: Maximum ratio of width to height.
Returns:
Contours as a list of numpy.ndarray.
"""
output = []
for contour in input_contours:
x,y,w,h = cv2.boundingRect(contour)
if (w < min_width or w > max_width):
continue
if (h < min_height or h > max_height):
continue
area = cv2.contourArea(contour)
if (area < min_area):
continue
if (cv2.arcLength(contour, True) < min_perimeter):
continue
hull = cv2.convexHull(contour)
solid = 100 * area / cv2.contourArea(hull)
if (solid < solidity[0] or solid > solidity[1]):
continue
if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):
continue
ratio = (float)(w) / h
if (ratio < min_ratio or ratio > max_ratio):
continue
output.append(contour)
return output
b = BlueBoiler()
# created a *threaded* video stream, allow the camera sensor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from webcam...")
cam = 1
width = 320
height = 240
exposure = 100.0
bucketCapture = BucketCapture(src=cam).start()
bucketCapture.stream.set(cv2.CAP_PROP_FRAME_WIDTH,width)
bucketCapture.stream.set(cv2.CAP_PROP_FRAME_HEIGHT,height)
bucketCapture.stream.set(cv2.CAP_PROP_EXPOSURE, exposure)
fps = FrameRate()
fps.start()
# loop over some frames...this time using the threaded stream
startTime = time.clock()
#while fps._numFrames < args["num_frames"]:
while (time.clock() - startTime < 30.0):
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
(frame, count, isNew) = bucketCapture.read()
# check to see if the frame should be displayed to our screen
if (isNew == True):
(f,g) = b.process(frame)
cv2.putText(frame,"{:.1f}".format(bucketCapture.fps.fps()) + " : {:.1f}".format(fps.fps()),(0,240),cv2.FONT_HERSHEY_PLAIN,2,(0,0,255))
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
bucketCapture.stop()
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Page.xframe_options'
db.add_column(u'cms_page', 'xframe_options',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Page.xframe_options'
db.delete_column(u'cms_page', 'xframe_options')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'),)", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', 'db_table': "u'cmsplugin_placeholderreference'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| |
from django.db.models import Q
# FIXME(willkg): This ignores the variety of other whitespace characters in unicode.
WHITESPACE = u' \t\r\n'
def to_tokens(text):
"""Breaks the search text into tokens"""
in_quotes = False
escape = False
tokens = []
token = []
for c in text:
if c == u'\\':
escape = True
token.append(c)
continue
if in_quotes:
if not escape and c == u'"':
in_quotes = False
token.append(c)
elif not escape and c == u'"':
in_quotes = True
token.append(c)
elif c in WHITESPACE:
if token:
tokens.append(u''.join(token))
token = []
else:
token.append(c)
escape = False
if in_quotes:
# Finish off a missing quote
if token:
token.append(u'"')
else:
tokens[-1] = tokens[-1] + u'"'
if token:
# Add last token
tokens.append(u''.join(token))
return tokens
class ParseError(Exception):
pass
def unescape(text):
"""Unescapes text
>>> unescape(u'abc')
u'abc'
>>> unescape(u'\\abc')
u'abc'
>>> unescape(u'\\\\abc')
u'\\abc'
"""
# Note: We can ditch this and do it in tokenizing if tokenizing
# returned typed tokens rather than a list of strings.
new_text = []
escape = False
for c in text:
if not escape and c == u'\\':
escape = True
continue
new_text.append(c)
escape = False
return u''.join(new_text)
def build_match(field, token):
return Q(**{'%s__icontains' % field: unescape(token)})
def build_match_phrase(field, token):
return Q(**{'%s__icontains' % field: unescape(token)})
def build_or(clauses):
if len(clauses) == 1:
return clauses[0]
q = clauses[0]
for clause in clauses[1:]:
q = q | clause
return q
def build_and(clauses):
if len(clauses) == 1:
return clauses[0]
q = clauses[0]
for clause in clauses[1:]:
q = q & clause
return q
def parse_match(field, tokens):
"""Parses a match or match_phrase node
:arg field: the field we're querying on
:arg tokens: list of tokens to consume
:returns: list of match clauses
"""
clauses = []
while tokens and tokens[-1] not in (u'OR', u'AND'):
token = tokens.pop()
if token.startswith(u'"'):
clauses.append(build_match_phrase(field, token[1:-1]))
else:
clauses.append(build_match(field, token))
return clauses
def parse_oper(field, lhs, tokens):
"""Parses a single bool query
:arg field: the field we're querying on
:arg lhs: the clauses on the left hand side
:arg tokens: list of tokens to consume
:returns: bool query
"""
token = tokens.pop()
rhs = parse_query(field, tokens)
if token == u'OR':
lhs.extend(rhs)
return build_or(lhs)
elif token == u'AND':
lhs.extend(rhs)
return build_and(lhs)
# Note: This probably will never get reached given the way
# parse_match slurps. If the code were changed, it's possible this
# might be triggerable.
raise ParseError('Not an oper token: {0}'.format(token))
def parse_query(field, tokens):
"""Parses a match or query
:arg field: the field we're querying on
:arg tokens: list of tokens to consume
:returns: list of clauses
"""
match_clauses = parse_match(field, tokens)
if tokens:
return [parse_oper(field, match_clauses, tokens)]
return match_clauses
def generate_query(field, text):
"""Parses the search text and returns a Q
This tries to handle parse errors. If the text is unparseable, it
returns a single Q.
:arg field: the field to search
:arg text: the user's search text
:return: Q
It uses a recursive descent parser with this grammar::
query = match | match oper
oper = "AND" query |
"OR" query
match = token ... |
'"' token '"'
If it encounters a parse error, it attempts to recover, but if it
can't, then it just returns a single match query.
"""
# Build the Q tree data structure bottom up.
tokens = to_tokens(text)
tokens.reverse()
try:
clauses = parse_query(field, tokens)
except ParseError:
return build_match(field, text)
if len(clauses) > 1:
return build_or(clauses)
return clauses[0]
| |
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2018 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from .attachment import Attachment
from .cell import Cell
from .column import Column
from .discussion import Discussion
from .enums import AccessLevel
from ..types import *
from .user import User
from ..util import serialize
from ..util import deserialize
from datetime import datetime
class Row(object):
"""Smartsheet Row data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the Row model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._above = Boolean()
self._access_level = EnumeratedValue(AccessLevel)
self._attachments = TypedList(Attachment)
self._cells = TypedList(Cell)
self._columns = TypedList(Column)
self._conditional_format = String()
self._created_at = Timestamp()
self._created_by = TypedObject(User)
self._discussions = TypedList(Discussion)
self._expanded = Boolean()
self._filtered_out = Boolean()
self._format_ = String()
self._id_ = Number()
self._in_critical_path = Boolean()
self._indent = Number()
self._locked = Boolean()
self._locked_for_user = Boolean()
self._modified_at = Timestamp()
self._modified_by = TypedObject(User)
self._outdent = Number()
self._parent_id = Number()
self._permalink = String()
self._row_number = Number()
self._sheet_id = Number()
self._sibling_id = Number()
self._to_bottom = Boolean()
self._to_top = Boolean()
self._version = Number()
if props:
deserialize(self, props)
# requests package Response object
self.request_response = None
self.__initialized = True
def __getattr__(self, key):
if key == 'format':
return self.format_
elif key == 'id':
return self.id_
else:
raise AttributeError(key)
def __setattr__(self, key, value):
if key == 'format':
self.format_ = value
elif key == 'id':
self.id_ = value
else:
super(Row, self).__setattr__(key, value)
@property
def above(self):
return self._above.value
@above.setter
def above(self, value):
self._above.value = value
@property
def access_level(self):
return self._access_level
@access_level.setter
def access_level(self, value):
self._access_level.set(value)
@property
def attachments(self):
return self._attachments
@attachments.setter
def attachments(self, value):
self._attachments.load(value)
@property
def cells(self):
return self._cells
@cells.setter
def cells(self, value):
self._cells.load(value)
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, value):
self._columns.load(value)
@property
def conditional_format(self):
return self._conditional_format.value
@conditional_format.setter
def conditional_format(self, value):
self._conditional_format.value = value
@property
def created_at(self):
return self._created_at.value
@created_at.setter
def created_at(self, value):
self._created_at.value = value
@property
def created_by(self):
return self._created_by.value
@created_by.setter
def created_by(self, value):
self._created_by.value = value
@property
def discussions(self):
return self._discussions
@discussions.setter
def discussions(self, value):
self._discussions.load(value)
@property
def expanded(self):
return self._expanded.value
@expanded.setter
def expanded(self, value):
self._expanded.value = value
@property
def filtered_out(self):
return self._filtered_out.value
@filtered_out.setter
def filtered_out(self, value):
self._filtered_out.value = value
@property
def format_(self):
return self._format_.value
@format_.setter
def format_(self, value):
self._format_.value = value
@property
def id_(self):
return self._id_.value
@id_.setter
def id_(self, value):
self._id_.value = value
@property
def in_critical_path(self):
return self._in_critical_path.value
@in_critical_path.setter
def in_critical_path(self, value):
self._in_critical_path.value = value
@property
def indent(self):
return self._indent.value
@indent.setter
def indent(self, value):
self._indent.value = value
@property
def locked(self):
return self._locked.value
@locked.setter
def locked(self, value):
self._locked.value = value
@property
def locked_for_user(self):
return self._locked_for_user.value
@locked_for_user.setter
def locked_for_user(self, value):
self._locked_for_user.value = value
@property
def modified_at(self):
return self._modified_at.value
@modified_at.setter
def modified_at(self, value):
self._modified_at.value = value
@property
def modified_by(self):
return self._modified_by.value
@modified_by.setter
def modified_by(self, value):
self._modified_by.value = value
@property
def outdent(self):
return self._outdent.value
@outdent.setter
def outdent(self, value):
self._outdent.value = value
@property
def parent_id(self):
return self._parent_id.value
@parent_id.setter
def parent_id(self, value):
self._parent_id.value = value
@property
def permalink(self):
return self._permalink.value
@permalink.setter
def permalink(self, value):
self._permalink.value = value
@property
def row_number(self):
return self._row_number.value
@row_number.setter
def row_number(self, value):
self._row_number.value = value
@property
def sheet_id(self):
return self._sheet_id.value
@sheet_id.setter
def sheet_id(self, value):
self._sheet_id.value = value
@property
def sibling_id(self):
return self._sibling_id.value
@sibling_id.setter
def sibling_id(self, value):
self._sibling_id.value = value
@property
def to_bottom(self):
return self._to_bottom.value
@to_bottom.setter
def to_bottom(self, value):
self._to_bottom.value = value
@property
def to_top(self):
return self._to_top.value
@to_top.setter
def to_top(self, value):
self._to_top.value = value
@property
def version(self):
return self._version.value
@version.setter
def version(self, value):
self._version.value = value
def get_column(self, column_id):
for cell in self.cells:
if cell.column_id == column_id:
return cell
def set_column(self, column_id, replacement_cell):
for idx, cell in enumerate(self.cells):
if cell.column_id == column_id:
self.cells[idx] = replacement_cell
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
| |
import math, pygame
from pygame.locals import *
#############################################
## Standard colors (RGB)
BLACK = (20, 20, 40)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
#############################################
## Customize plot here
def function_to_print(x):
"""Write function to plot here.
Must take a single number x and return a single number y."""
return -x * (x - 3)
# Range of window
X_MIN = 0.0
X_MAX = 10.0
Y_MIN = -10.0
Y_MAX = 10.0
# Tick interval on axes
X_TICK = 2.5
Y_TICK = 2.5
# Granularity of plotted functions, more points -> higher resolution plot
N_POINTS = 100
# Colors
background_color = BLACK
plot_color = GREEN
grid_color = WHITE
# Note, it is also possible to make a list of functions to print
# and respective colors:
# functions = [(f1, color1), (f2, color2), ...]
#############################################
## Let the program calculate the rest
WIDTH = 640
HEIGHT = 480
X_SIZE = X_MAX - X_MIN
Y_SIZE = Y_MAX - Y_MIN
def coordinate_to_position(c):
"""Converts a model coordinate (vector) into a graphic position (pixel)"""
gx = (c[0] - X_MIN) * WIDTH / X_SIZE
gy = HEIGHT - (c[1] - Y_MIN) * HEIGHT / Y_SIZE
return gx, gy
def curve_coordinates(f, x0, x1, points):
"""Returns list of coordinates
Creates linear splines for this function f, from x0 to x1
Length of returned list == points."""
coordinates = []
x = x0
delta = (x1 - x0) / (points - 1)
while x <= x1:
coordinates += [[x, f(x)]]
x += delta
return coordinates
def linspace(x0, x1, points):
"""Returns a list of numbers of `points` elements,
with constant intervals between `x0` and `x1`"""
delta = (x1 - x0) / (points - 1)
return map(lambda x: x0 + delta * x, range(points))
def curve_coordinates2(f, x0, x1, points):
"""(Alternative implementation):
This is more compact and functional-like."""
return [[x, f(x)] for x in linspace(x0, x1, points)]
def draw_ticks(screen, axis):
"""Draws appropriate ticks on the specified axis.
axis == 0 -> X-axis, otherwise Y-axis.
This implementation is not so readable, see alternative implementation
for a more readable one."""
if axis == 0:
min = X_MIN
max = X_MAX
tick = X_TICK
limit = HEIGHT
else:
axis = 1
min = Y_MIN
max = Y_MAX
tick = Y_TICK
limit = WIDTH
start = min + min % tick
end = max - max % tick
points = (end - start) / tick + 1
t = limit / 120
for x in linspace(start, end, int(points)):
c = [0, 0]
c[axis] = x
v = coordinate_to_position(c)
a = v[1 - axis] + t
if a > limit:
a = limit
b = v[1 - axis] - t
if b < 0:
b = 0
# Copying v
s = list(v)
s[1 - axis] = a
e = list(v)
e[1 - axis] = b
pygame.draw.line(screen, grid_color, s, e, 2)
def draw_x_ticks(screen):
"""(Alternative implementation):
Draws appropriate ticks on the X-axis."""
start = X_MIN + X_MIN % X_TICK
end = X_MAX - X_MAX % X_TICK
points = (end - start) / X_TICK + 1
# t == half length of the tick line
t = HEIGHT / 120
# one iteration per tick
for x in linspace(start, end, int(points)):
v = coordinate_to_position([x, 0])
a = v[1] + t
b = v[1] - t
if a > HEIGHT:
a = HEIGHT
if b < 0:
b = 0
pygame.draw.line(screen, grid_color, [v[0], a], [v[0], b], 2)
def draw_y_ticks(screen):
"""(Alternative implementation):
Draws appropriate ticks on the Y-axis.
This function mirrors draw_x_ticks(...)"""
start = Y_MIN + Y_MIN % Y_TICK
end = Y_MAX - Y_MAX % Y_TICK
points = (end - start) / Y_TICK + 1
t = WIDTH / 120
for y in linspace(start, end, int(points)):
v = coordinate_to_position([0, y])
# print v
a = v[0] + t
b = v[0] - t
if (a > WIDTH):
a = WIDTH
if (b < 0):
b = 0
pygame.draw.line(screen, grid_color, [a, v[1]], [b, v[1]], 2)
def draw(screen, pp, plot_color):
"""Plots the points `pp` on the specified screen with the specified color."""
# Function
pygame.draw.lines(screen, plot_color, False, pp, 3)
def draw_axis(screen):
"""Draws the axes and ticks of the coordinate system."""
## Alternative implementations:
# draw_x_ticks(screen)
# draw_y_ticks(screen)
draw_ticks(screen, 0)
draw_ticks(screen, 1)
x_points = list(map(coordinate_to_position, [[X_MIN, 0], [X_MAX, 0]]))
y_points = list(map(coordinate_to_position, [[0, Y_MIN], [0, Y_MAX]]))
# X-Axis
pygame.draw.lines(screen, grid_color, False, x_points, 2)
# Y-Axis
pygame.draw.lines(screen, grid_color, False, y_points, 2)
def main():
"""Graphics: draws graphs on window and await EXIT or ESCAPE."""
pygame.init()
screen = pygame.display.set_mode([WIDTH, HEIGHT])
pygame.display.set_caption('Plot 2d')
clock = pygame.time.Clock()
screen.fill(background_color)
cc = curve_coordinates(function_to_print, X_MIN, X_MAX, N_POINTS)
pp = list(map(coordinate_to_position, cc))
# This would typically be done inside the loop, but since it is never
# updated: might as well keep it outside
draw(screen, pp, plot_color)
draw_axis(screen)
done = False
while not done:
time = clock.tick(60)
pygame.display.update()
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):
done = True
break
pygame.quit()
# if Python says run...
if __name__ == '__main__':
main()
| |
"""distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
__revision__ = "$Id: build_ext.py 65670 2008-08-14 07:35:13Z hirokazu.yamamoto $"
import sys, os, re
from site import USER_BASE, USER_SITE
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
from distutils import log
if os.name == 'nt':
from distutils.msvccompiler import get_build_version
MSVC_VERSION = int(get_build_version())
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile \
(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_ext(Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('plat-name=', 'p',
"platform name to cross-compile for, if supported "
"(default: %s)" % get_platform()),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
('include-dirs=', 'I',
"list of directories to search for header files" + sep_by),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries" + sep_by),
('rpath=', 'R',
"directories to search for shared C libraries at runtime"),
('link-objects=', 'O',
"extra explicit link objects to include in the link"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
('swig-cpp', None,
"make SWIG create C++ files (default is C)"),
('swig-opts=', None,
"list of SWIG command line options"),
('swig=', None,
"path to the SWIG executable"),
('user', None,
"add user include, library and rpath"),
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.extensions = None
self.build_lib = None
self.plat_name = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
self.user = None
def finalize_options(self):
from distutils import sysconfig
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('plat_name', 'plat_name'),
)
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.append(py_include)
if plat_py_include != py_include:
self.include_dirs.append(plat_py_include)
if isinstance(self.libraries, str):
self.libraries = [self.libraries]
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
if self.rpath is None:
self.rpath = []
elif isinstance(self.rpath, str):
self.rpath = self.rpath.split(os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
# the 'libs' directory is for binary installs - we assume that
# must be the *native* platform. But we don't really support
# cross-compiling via a binary install anyway, so we let it go.
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC'))
if MSVC_VERSION == 9:
# Use the .lib files for the correct architecture
if self.plat_name == 'win32':
suffix = ''
else:
# win-amd64 or win-ia64
suffix = self.plat_name[4:]
new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
if suffix:
new_lib = os.path.join(new_lib, suffix)
self.library_dirs.append(new_lib)
elif MSVC_VERSION == 8:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS8.0', 'win32release'))
elif MSVC_VERSION == 7:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS7.1'))
else:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VC6'))
# OS/2 (EMX) doesn't support Debug vs Release builds, but has the
# import libraries in its "Config" subdirectory
if os.name == 'os2':
self.library_dirs.append(os.path.join(sys.exec_prefix, 'Config'))
# for extensions under Cygwin and AtheOS Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
"python" + get_python_version(),
"config"))
else:
# building python standard extensions
self.library_dirs.append('.')
# for extensions under Linux with a shared Python library,
# Python's library directory must be appended to library_dirs
if (sys.platform.startswith('linux') or sys.platform.startswith('gnu')) \
and sysconfig.get_config_var('Py_ENABLE_SHARED'):
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = self.define.split(',')
self.define = [(symbol, '1') for symbol in defines]
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = self.undef.split(',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# Finally add the user include and library directories if requested
if self.user:
user_include = os.path.join(USER_BASE, "include")
user_lib = os.path.join(USER_BASE, "lib")
if os.path.isdir(user_include):
self.include_dirs.append(user_include)
if os.path.isdir(user_lib):
self.library_dirs.append(user_lib)
self.rpath.append(user_lib)
def run(self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
if os.name == 'nt' and self.plat_name != get_platform():
self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
def check_extensions_list(self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(extensions, list):
raise DistutilsSetupError(
"'ext_modules' option must be a list of Extension instances")
for i, ext in enumerate(extensions):
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
(ext_name, build_info) = ext
log.warn("old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s'"
"-- please convert to Extension instance" % ext_name)
if not isinstance(ext, tuple) and len(ext) != 2:
raise DistutilsSetupError(
"each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple")
if not (isinstance(ext_name, str) and
extension_name_re.match(ext_name)):
raise DistutilsSetupError(
"first element of each tuple in 'ext_modules' "
"must be the extension name (a string)")
if not instance(build_info, DictionaryType):
raise DistutilsSetupError(
"second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)")
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in ('include_dirs',
'library_dirs',
'libraries',
'extra_objects',
'extra_compile_args',
'extra_link_args'):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if 'def_file' in build_info:
log.warn("'def_file' element of build info dict "
"no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
raise DistutilsSetupError(
"'macros' element of build info dict "
"must be 1- or 2-tuple")
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs(self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
outputs.append(os.path.join(self.build_lib,
self.get_ext_filename(fullname)))
return outputs
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
sources = list(sources)
fullname = self.get_ext_fullname(ext.name)
if self.inplace:
# ignore build-lib -- put the compiled extension into
# the source tree along with pure Python modules
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
base = modpath[-1]
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
ext_filename = os.path.join(package_dir,
self.get_ext_filename(base))
else:
ext_filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX -- this is a Vile HACK!
#
# The setup.py script for Python on Unix needs to be able to
# get this list so it can perform all the clean up needed to
# avoid keeping object files around when cleaning out a failed
# build of an extension module. Since Distutils does not
# track dependencies, we have to get rid of intermediates to
# ensure all the intermediates will be properly re-built.
#
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_filename,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def swig_sources(self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if self.swig_cpp or ('-c++' in self.swig_opts) or \
('-c++' in extension.swig_opts):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
def find_swig(self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
elif os.name == "os2":
# assume swig available in the PATH.
return "swig.exe"
else:
raise DistutilsPlatformError(
"I don't know how to find (much less run) SWIG "
"on platform '%s'" % os.name)
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullname(self, ext_name):
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename(self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = ext_name.split('.')
# OS/2 has an 8 character module (extension) limit :-(
if os.name == "os2":
ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8]
# extensions in debug_mode are named 'module_d.pyd' under windows
so_ext = get_config_var('SO')
if os.name == 'nt' and self.debug:
return os.path.join(*ext_path) + '_d' + so_ext
return os.path.join(*ext_path) + so_ext
def get_export_symbols(self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "PyInit_" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "init" function.
"""
initfunc_name = "PyInit_" + ext.name.split('.')[-1]
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries(self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows and OS/2, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils.msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
return ext.libraries
elif sys.platform == "os2emx":
# EMX/GCC requires the python library explicitly, and I
# believe VACPP does as well (though not confirmed) - AIM Apr01
template = "python%d%d"
# debug versions of the main DLL aren't supported, at least
# not at this time - AIM Apr01
#if self.debug:
# template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "cygwin":
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "atheos":
from distutils import sysconfig
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# Get SHLIBS from Makefile
extra = []
for lib in sysconfig.get_config_var('SHLIBS').split():
if lib.startswith('-l'):
extra.append(lib[2:])
else:
extra.append(lib)
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib, "m"] + extra
elif sys.platform == 'darwin':
# Don't use the default code below
return ext.libraries
else:
from distutils import sysconfig
if sysconfig.get_config_var('Py_ENABLE_SHARED'):
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
return ext.libraries + [pythonlib]
else:
return ext.libraries
| |
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from netaddr import IPNetwork, IPAddress
from nose.tools import *
from nose_parameterized import parameterized
from mock import patch, Mock
import unittest
import json
from pycalico.block import (AllocationBlock,
BLOCK_SIZE,
NoHostAffinityWarning,
AlreadyAssignedError,
AddressNotAssignedError,
get_block_cidr_for_address)
from etcd import EtcdResult
network = IPNetwork("192.168.25.0/26")
BLOCK_V4_1 = IPNetwork("10.11.12.0/26")
BLOCK_V6_1 = IPNetwork("2001:abcd:def0::/122")
class TestAllocationBlock(unittest.TestCase):
def test_init_block_id(self):
host = "test_host"
block = AllocationBlock(network, host)
assert_equal(block.host_affinity, host)
assert_equal(block.cidr, network)
assert_equal(block.count_free_addresses(), BLOCK_SIZE)
def test_to_json(self):
host = "test_host"
block = AllocationBlock(network, host)
# Set up an allocation
attr = {
AllocationBlock.ATTR_HANDLE_ID: "test_key",
AllocationBlock.ATTR_SECONDARY: {
"key1": "value1",
"key2": "value2"
}
}
block.attributes.append(attr)
block.allocations[5] = 0
assert_equal(block.count_free_addresses(), BLOCK_SIZE - 1)
# Read out the JSON
json_str = block.to_json()
json_dict = json.loads(json_str)
assert_equal(json_dict[AllocationBlock.CIDR], str(network))
assert_equal(json_dict[AllocationBlock.AFFINITY], "host:test_host")
assert_dict_equal(json_dict[AllocationBlock.ATTRIBUTES][0],
attr)
expected_allocations = [None] * BLOCK_SIZE
expected_allocations[5] = 0
assert_list_equal(json_dict[AllocationBlock.ALLOCATIONS],
expected_allocations)
# Verify we can read the JSON back in.
result = Mock(spec=EtcdResult)
result.value = json_str
block2 = AllocationBlock.from_etcd_result(result)
assert_equal(block2.to_json(), json_str)
def test_from_etcd_result(self):
result = Mock(spec=EtcdResult)
# Build a JSON object for the Block
attr0 = {
AllocationBlock.ATTR_HANDLE_ID: "test_key1",
AllocationBlock.ATTR_SECONDARY: {
"key1": "value11",
"key2": "value21"
}
}
attr1 = {
AllocationBlock.ATTR_HANDLE_ID: "test_key2",
AllocationBlock.ATTR_SECONDARY: {
"key1": "value12",
"key2": "value22"
}
}
allocations = [None] * BLOCK_SIZE
allocations[0] = 0
allocations[1] = 0
allocations[2] = 1
json_dict = {
AllocationBlock.CIDR: str(network),
AllocationBlock.AFFINITY: "host:Sammy Davis, Jr.",
AllocationBlock.ALLOCATIONS: allocations,
AllocationBlock.ATTRIBUTES: [attr0, attr1]
}
result.value = json.dumps(json_dict)
block = AllocationBlock.from_etcd_result(result)
assert_equal(block.count_free_addresses(), BLOCK_SIZE - 3)
assert_equal(block.db_result, result)
assert_equal(block.cidr, network)
assert_equal(block.host_affinity, "Sammy Davis, Jr.")
assert_list_equal(block.allocations[:3], [0, 0, 1])
assert_dict_equal(block.attributes[0], attr0)
assert_dict_equal(block.attributes[1], attr1)
# Verify we can get JSON back out.
json_str = block.to_json()
assert_equal(result.value, json_str)
def test_update_result(self):
result = Mock(spec=EtcdResult)
# Build a JSON object for the Block
attr0 = {
AllocationBlock.ATTR_HANDLE_ID: "test_key1",
AllocationBlock.ATTR_SECONDARY: {
"key1": "value11",
"key2": "value21"
}
}
attr1 = {
AllocationBlock.ATTR_HANDLE_ID: "test_key2",
AllocationBlock.ATTR_SECONDARY: {
"key1": "value12",
"key2": "value22"
}
}
allocations = [None] * BLOCK_SIZE
allocations[0] = 0
allocations[1] = 0
allocations[2] = 1
json_dict = {
AllocationBlock.CIDR: str(network),
AllocationBlock.AFFINITY: "host:Sammy Davis, Jr.",
AllocationBlock.ALLOCATIONS: allocations,
AllocationBlock.ATTRIBUTES: [attr0, attr1]
}
result.value = json.dumps(json_dict)
block = AllocationBlock.from_etcd_result(result)
# Modify the block.
block.allocations[3] = 1
# Get the update. It should be the same result object, but with the
# value set to the new JSON.
block_json_str = block.to_json()
updated = block.update_result()
assert_equal(updated, result)
assert_equal(result.value, block_json_str)
# Verify the update appears in the JSON
block_json_dict = json.loads(block_json_str)
json_dict[AllocationBlock.ALLOCATIONS][3] = 1
assert_dict_equal(block_json_dict, json_dict)
@patch("pycalico.block.get_hostname", return_value="test_host1")
def test_auto_assign_v4(self, m_get_hostname):
block0 = _test_block_empty_v4()
attr = {"key21": "value1", "key22": "value2"}
ips = block0.auto_assign(1, "key2", attr)
assert_list_equal([BLOCK_V4_1[0]], ips)
assert_equal(block0.attributes[0][AllocationBlock.ATTR_HANDLE_ID],
"key2")
assert_dict_equal(block0.attributes[0][AllocationBlock.ATTR_SECONDARY],
attr)
assert_equal(block0.count_free_addresses(), BLOCK_SIZE - 1)
# Allocate again from the first block, with a different key.
ips = block0.auto_assign(3, "key3", attr)
assert_list_equal([BLOCK_V4_1[1],
BLOCK_V4_1[2],
BLOCK_V4_1[3]], ips)
assert_equal(block0.attributes[1][AllocationBlock.ATTR_HANDLE_ID],
"key3")
assert_dict_equal(block0.attributes[1][AllocationBlock.ATTR_SECONDARY],
attr)
assert_equal(block0.count_free_addresses(), BLOCK_SIZE - 4)
# Allocate with different attributes.
ips = block0.auto_assign(3, "key3", {})
assert_list_equal([BLOCK_V4_1[4],
BLOCK_V4_1[5],
BLOCK_V4_1[6]], ips)
assert_equal(block0.attributes[2][AllocationBlock.ATTR_HANDLE_ID],
"key3")
assert_dict_equal(block0.attributes[2][AllocationBlock.ATTR_SECONDARY],
{})
assert_equal(block0.count_free_addresses(), BLOCK_SIZE - 7)
# Allocate 3 from a new block.
block1 = _test_block_empty_v4()
ips = block1.auto_assign(3, "key2", attr)
assert_list_equal([BLOCK_V4_1[0],
BLOCK_V4_1[1],
BLOCK_V4_1[2]], ips)
assert_equal(block1.count_free_addresses(), BLOCK_SIZE - 3)
# Allocate again with same keys.
ips = block1.auto_assign(3, "key2", attr)
assert_list_equal([BLOCK_V4_1[3],
BLOCK_V4_1[4],
BLOCK_V4_1[5]], ips)
assert_equal(block1.count_free_addresses(), BLOCK_SIZE - 6)
# Assert we didn't create another attribute entry.
assert_equal(len(block1.attributes), 1)
# Test allocating 0 IPs with a new key.
ips = block1.auto_assign(0, "key3", attr)
assert_list_equal(ips, [])
assert_equal(len(block1.attributes), 1)
assert_equal(block1.count_free_addresses(), BLOCK_SIZE - 6)
# Allocate addresses, so the block is nearly full
ips = block1.auto_assign(BLOCK_SIZE - 8, None, {})
assert_equal(len(ips), BLOCK_SIZE - 8)
assert_equal(block1.count_free_addresses(), 2)
# Allocate 4 addresses. Only 2 addresses left.
ips = block1.auto_assign(4, None, {})
assert_list_equal([BLOCK_V4_1[-2],
BLOCK_V4_1[-1]], ips)
assert_equal(block1.count_free_addresses(), 0)
# Block is now full, further attempts return no addresses
ips = block1.auto_assign(4, None, {})
assert_list_equal([], ips)
# Test that we can cope with already allocated addresses that aren't
# sequential.
block2 = _test_block_not_empty_v4()
ips = block2.auto_assign(4, None, {})
assert_list_equal([BLOCK_V4_1[0],
BLOCK_V4_1[1],
BLOCK_V4_1[3],
BLOCK_V4_1[5]], ips)
assert_equal(block2.count_free_addresses(), BLOCK_SIZE - 6)
@patch("pycalico.block.get_hostname", return_value="test_host1")
def test_auto_assign_v6(self, m_get_hostname):
block0 = _test_block_empty_v6()
attr = {"key21": "value1", "key22": "value2"}
ips = block0.auto_assign(1, "key2", attr)
assert_list_equal([BLOCK_V6_1[0]], ips)
assert_equal(block0.attributes[0][AllocationBlock.ATTR_HANDLE_ID],
"key2")
assert_dict_equal(block0.attributes[0][AllocationBlock.ATTR_SECONDARY],
attr)
assert_equal(block0.count_free_addresses(), BLOCK_SIZE - 1)
# Allocate again from the first block, with a different key.
ips = block0.auto_assign(3, "key3", attr)
assert_list_equal([BLOCK_V6_1[1],
BLOCK_V6_1[2],
BLOCK_V6_1[3]], ips)
assert_equal(block0.attributes[1][AllocationBlock.ATTR_HANDLE_ID],
"key3")
assert_dict_equal(block0.attributes[1][AllocationBlock.ATTR_SECONDARY],
attr)
assert_equal(block0.count_free_addresses(), BLOCK_SIZE - 4)
# Allocate with different attributes.
ips = block0.auto_assign(3, "key3", {})
assert_list_equal([BLOCK_V6_1[4],
BLOCK_V6_1[5],
BLOCK_V6_1[6]], ips)
assert_equal(block0.attributes[2][AllocationBlock.ATTR_HANDLE_ID],
"key3")
assert_dict_equal(block0.attributes[2][AllocationBlock.ATTR_SECONDARY],
{})
assert_equal(block0.count_free_addresses(), BLOCK_SIZE - 7)
# Allocate 3 from a new block.
block1 = _test_block_empty_v6()
ips = block1.auto_assign(3, "key2", attr)
assert_list_equal([BLOCK_V6_1[0],
BLOCK_V6_1[1],
BLOCK_V6_1[2]], ips)
assert_equal(block1.count_free_addresses(), BLOCK_SIZE - 3)
# Allocate again with same keys.
ips = block1.auto_assign(3, "key2", attr)
assert_list_equal([BLOCK_V6_1[3],
BLOCK_V6_1[4],
BLOCK_V6_1[5]], ips)
assert_equal(block1.count_free_addresses(), BLOCK_SIZE - 6)
# Assert we didn't create another attribute entry.
assert_equal(len(block1.attributes), 1)
# Test allocating 0 IPs with a new key.
ips = block1.auto_assign(0, "key3", attr)
assert_list_equal(ips, [])
assert_equal(len(block1.attributes), 1)
assert_equal(block1.count_free_addresses(), BLOCK_SIZE - 6)
# Allocate addresses, so the block is nearly full
ips = block1.auto_assign(BLOCK_SIZE - 8, None, {})
assert_equal(len(ips), BLOCK_SIZE - 8)
assert_equal(block1.count_free_addresses(), 2)
# Allocate 4 addresses. 248+3+3 = 254, so only 2 addresses left
ips = block1.auto_assign(4, None, {})
assert_list_equal([BLOCK_V6_1[-2],
BLOCK_V6_1[-1]], ips)
assert_equal(block1.count_free_addresses(), 0)
# Block is now full, further attempts return no addresses
ips = block1.auto_assign(4, None, {})
assert_list_equal([], ips)
# Test that we can cope with already allocated addresses that aren't
# sequential.
block2 = _test_block_not_empty_v6()
assert_equal(block2.count_free_addresses(), BLOCK_SIZE - 2)
ips = block2.auto_assign(4, None, {})
assert_list_equal([BLOCK_V6_1[0],
BLOCK_V6_1[1],
BLOCK_V6_1[3],
BLOCK_V6_1[5]], ips)
assert_equal(block2.count_free_addresses(), BLOCK_SIZE - 6)
# Test ordinal math still works for small IPv6 addresses
sm_cidr = IPNetwork("::1234:5600/122")
block3 = AllocationBlock(sm_cidr, "test_host1")
ips = block3.auto_assign(4, None, {})
assert_list_equal([sm_cidr[0],
sm_cidr[1],
sm_cidr[2],
sm_cidr[3]], ips)
assert_equal(block3.count_free_addresses(), BLOCK_SIZE - 4)
@patch("pycalico.block.get_hostname", return_value="not_the_right_host")
def test_auto_assign_wrong_host(self, m_get_hostname):
block0 = _test_block_empty_v4()
assert_raises(NoHostAffinityWarning, block0.auto_assign, 1, None, {})
# Disable the check.
ips = block0.auto_assign(1, None, {}, affinity_check=False)
assert_list_equal([BLOCK_V4_1[0]], ips)
def test_assign_v4(self):
block0 = _test_block_empty_v4()
ip0 = BLOCK_V4_1[2]
attr = {"key21": "value1", "key22": "value2"}
block0.assign(ip0, "key0", attr)
# Try to assign the same address again.
assert_raises(AlreadyAssignedError, block0.assign, ip0, "key0", attr)
def test_assign_v6(self):
block0 = _test_block_empty_v6()
ip0 = BLOCK_V6_1[2]
attr = {"key21": "value1", "key22": "value2"}
block0.assign(ip0, "key0", attr)
# Try to assign the same address again.
assert_raises(AlreadyAssignedError, block0.assign, ip0, "key0", attr)
@patch("pycalico.block.get_hostname", return_value="test_host1")
def test_release_v4(self, m_get_hostname):
"""
Mainline test of releasing addresses from a block
"""
block0 = _test_block_not_empty_v4()
ip = BLOCK_V4_1[13]
block0.assign(ip, None, {})
(err, handles) = block0.release({ip})
assert_set_equal(err, set())
assert_is_none(block0.allocations[13])
assert_equal(len(block0.attributes), 1)
# New assignments with different attrs, increases number of attrs to 2
ips0 = block0.auto_assign(5, "test_key", {"test": "value"})
ips1 = block0.auto_assign(5, "test_key", {"test": "value"})
assert_equal(len(block0.attributes), 2)
# Release half, still 2 unique attrs
(err, handles) = block0.release(set(ips0))
assert_set_equal(err, set())
assert_equal(len(block0.attributes), 2)
# Reassign 5, should be the same 5 just released.
ips2 = block0.auto_assign(5, "test_key", {"test": "value"})
assert_list_equal(ips2, ips0)
assert_equal(len(block0.attributes), 2)
# Assign additional addresses with new key, 3 attrs stored.
ips3 = block0.auto_assign(2, "test_key2", {})
assert_equal(len(block0.attributes), 3)
assert_equal(block0.allocations[11], 1)
assert_equal(block0.allocations[12], 2)
# Release all IPs with 2nd set of attrs, reduced to 2 and renumbered.
(err, handles) = block0.release(set(ips2 + ips1))
assert_set_equal(err, set())
assert_equal(len(block0.attributes), 2)
assert_equal(block0.allocations[11], None)
assert_equal(block0.allocations[12], 1)
# Check that release with already released IP returns the bad IP, but
# releases the others.
bad_ips = {BLOCK_V4_1[0]}
(err, handles) = block0.release(set(ips3).union(bad_ips))
assert_set_equal(err, bad_ips)
assert_equal(block0.allocations[12], None)
assert_equal(block0.allocations[13], None)
@patch("pycalico.block.get_hostname", return_value="test_host1")
def test_release_v6(self, m_get_hostname):
"""
Mainline test of releasing addresses from a block
"""
block0 = _test_block_not_empty_v6()
ip = IPAddress("2001:abcd:def0::13")
block0.assign(ip, None, {})
(err, handles) = block0.release({ip})
assert_set_equal(err, set())
assert_is_none(block0.allocations[13])
assert_equal(len(block0.attributes), 1)
# New assignments with different attrs, increases number of attrs to 2
ips0 = block0.auto_assign(5, "test_key", {"test": "value"})
ips1 = block0.auto_assign(5, "test_key", {"test": "value"})
assert_equal(len(block0.attributes), 2)
# Release half, still 2 unique attrs
(err, handles) = block0.release(set(ips0))
assert_set_equal(err, set())
assert_equal(len(block0.attributes), 2)
# Reassign 5, should be the same 5 just released.
ips2 = block0.auto_assign(5, "test_key", {"test": "value"})
assert_list_equal(ips2, ips0)
assert_equal(len(block0.attributes), 2)
# Assign additional addresses with new key, 3 attrs stored.
ips3 = block0.auto_assign(2, "test_key2", {})
assert_equal(len(block0.attributes), 3)
assert_equal(block0.allocations[11], 1)
assert_equal(block0.allocations[12], 2)
# Release all IPs with 2nd set of attrs, reduced to 2 and renumbered.
(err, handles) = block0.release(set(ips2 + ips1))
assert_set_equal(err, set())
assert_equal(len(block0.attributes), 2)
assert_equal(block0.allocations[11], None)
assert_equal(block0.allocations[12], 1)
# Check that release with already released IP returns the bad IP, but
# releases the others.
bad_ips = {IPAddress("2001:abcd:def0::")}
(err, handles) = block0.release(set(ips3).union(bad_ips))
assert_set_equal(err, bad_ips)
assert_equal(block0.allocations[12], None)
assert_equal(block0.allocations[13], None)
def test_get_ip_assignments_by_handle(self):
"""
Mainline test for get_ip_assignments_by_handle()
"""
block0 = _test_block_not_empty_v4()
ips = block0.get_ip_assignments_by_handle("key1")
assert_list_equal(ips, [IPAddress("10.11.12.2"),
IPAddress("10.11.12.4")])
ip0 = IPAddress("10.11.12.56")
block0.assign(ip0, None, {})
ips = block0.get_ip_assignments_by_handle(None)
assert_list_equal(ips, [ip0])
ips = block0.get_ip_assignments_by_handle("this_handle_doesnt_exist")
assert_list_equal(ips, [])
def test_get_attributes_for_ip(self):
"""
Mainline test for get_attributes_for_ip()
"""
block0 = _test_block_not_empty_v4()
(handle, attrs) = block0.get_attributes_for_ip(IPAddress("10.11.12.2"))
assert_equal(handle, "key1")
assert_dict_equal(attrs, {"key21": "value1", "key22": "value2"})
ip0 = IPAddress("10.11.12.56")
attr0 = {"a": 1, "b": 2, "c": 3}
handle0 = "key0"
block0.assign(ip0, handle0, attr0)
(handle, attr) = block0.get_attributes_for_ip(ip0)
assert_equal(handle, handle0)
assert_dict_equal(attr, attr0)
ip1 = IPAddress("10.11.12.57")
assert_raises(AddressNotAssignedError,
block0.get_attributes_for_ip, ip1)
class TestGetBlockCIDRForAddress(unittest.TestCase):
@parameterized.expand([
(IPAddress("192.168.3.7"),
IPNetwork("192.168.3.0/26")),
(IPAddress("10.34.11.75"),
IPNetwork("10.34.11.64/26")),
(IPAddress("2001:abee:beef::1234"),
IPNetwork("2001:abee:beef::1200/122")),
(IPAddress("2001:abee:beef::"),
IPNetwork("2001:abee:beef::/122")),
])
def test_get_block_cidr(self, address, cidr):
"""
Test get_block_cidr_for_address
"""
block_id = get_block_cidr_for_address(address)
assert_equal(block_id, cidr)
def _test_block_empty_v4():
block = AllocationBlock(BLOCK_V4_1, "test_host1")
return block
def _test_block_not_empty_v4():
block = _test_block_empty_v4()
attr = {AllocationBlock.ATTR_HANDLE_ID: "key1",
AllocationBlock.ATTR_SECONDARY: {"key21": "value1",
"key22": "value2"}}
block.attributes.append(attr)
block.allocations[2] = 0
block.allocations[4] = 0
return block
def _test_block_empty_v6():
block = AllocationBlock(BLOCK_V6_1, "test_host1")
return block
def _test_block_not_empty_v6():
block = _test_block_empty_v6()
attr = {AllocationBlock.ATTR_HANDLE_ID: "key1",
AllocationBlock.ATTR_SECONDARY: {"key21": "value1",
"key22": "value2"}}
block.attributes.append(attr)
block.allocations[2] = 0
block.allocations[4] = 0
return block
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave smartcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop smartcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing smartcoind/smartcoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: smartcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for f in filenames:
print("From" , f, ":")
from collections import deque
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("SMARTCOIND", "smartcoind"),
help="smartcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("SMARTCOIND", "smartcoind"),
help="smartcoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| |
#!/usr/bin/env python3
# -*- coding : utf-8 -*-
# PyAX-12
# The MIT License
#
# Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module contain unit tests for the "Packet" class.
"""
import pyax12.packet as pk
import unittest
class TestPacket(unittest.TestCase):
"""
Contains unit tests for the "packet" module.
"""
# Tests for the compute_checksum function ###############################
def test_checksum_func_incomplete(self):
"""Check that the compute_checksum function fails when the
"byte_seq" argument is incomplete (len(byte_seq) < 3)."""
byte_seq = (0x01, 0x02) # incomplete packet
with self.assertRaises(ValueError):
pk.compute_checksum(byte_seq)
def test_checksum_func_wrong_arg_type(self):
"""Check that the compute_checksum function fails when the
"byte_seq" argument has a wrong type."""
# Check with None
byte_seq = None # wrong type
with self.assertRaises(TypeError):
pk.compute_checksum(byte_seq)
# Check with an integer
byte_seq = 0 # wrong type
with self.assertRaises(TypeError):
pk.compute_checksum(byte_seq)
# Check with an integer
byte_seq = 1 # wrong type
with self.assertRaises(TypeError):
pk.compute_checksum(byte_seq)
# Check with an integer
byte_seq = 3 # wrong type
with self.assertRaises(TypeError):
pk.compute_checksum(byte_seq)
# Check with a float
byte_seq = 1.0 # wrong type
with self.assertRaises(TypeError):
pk.compute_checksum(byte_seq)
# Check with a string
byte_seq = "hello" # wrong type
with self.assertRaises(TypeError):
pk.compute_checksum(byte_seq)
def test_checksum_func_good_arg_type(self):
"""Check the "compute_checksum" function using the example 2 of the
Dynamixel user guide: "Reading the internal temperature of the
Dynamixel actuator with an ID of 1" (p.20)."""
# Check with a tuple
byte_seq = (0x01, 0x04, 0x02, 0x2b, 0x01)
checksum_byte = pk.compute_checksum(byte_seq)
expected_checksum_byte = 0xcc
self.assertEqual(checksum_byte, expected_checksum_byte)
# Check with a list
byte_seq = [0x01, 0x04, 0x02, 0x2b, 0x01]
checksum_byte = pk.compute_checksum(byte_seq)
expected_checksum_byte = 0xcc
self.assertEqual(checksum_byte, expected_checksum_byte)
# Check with a bytes string
byte_seq = bytes((0x01, 0x04, 0x02, 0x2b, 0x01))
checksum_byte = pk.compute_checksum(byte_seq)
expected_checksum_byte = 0xcc
self.assertEqual(checksum_byte, expected_checksum_byte)
# Check with a bytearray
byte_seq = bytearray((0x01, 0x04, 0x02, 0x2b, 0x01))
checksum_byte = pk.compute_checksum(byte_seq)
expected_checksum_byte = 0xcc
self.assertEqual(checksum_byte, expected_checksum_byte)
def test_checksum_func_wrong_byte_type(self):
"""Check that the compute_checksum function fails when an item of the
"byte_seq" argument has a wrong type (float)."""
# Check with None
byte_seq = (0x01, None, 0x02, 0x2b, 0x01) # wrong type
with self.assertRaises(TypeError):
pk.compute_checksum(byte_seq)
# Check with float
byte_seq = (0x01, 1.0, 0x02, 0x2b, 0x01) # wrong type
with self.assertRaises(TypeError):
pk.compute_checksum(byte_seq)
# Check with string
byte_seq = (0x01, "hi", 0x02, 0x2b, 0x01) # wrong type
with self.assertRaises(TypeError):
pk.compute_checksum(byte_seq)
# Check with tuple
byte_seq = (0x01, (), 0x02, 0x2b, 0x01) # wrong type
with self.assertRaises(TypeError):
pk.compute_checksum(byte_seq)
def test_checksum_func_wrong_byte_value(self):
"""Check that the compute_checksum function fails when an item of the
"byte_seq" argument has a wrong value (too low or too high)."""
# Too low value
byte_seq = (0x01, -1, 0x02, 0x2b, 0x01) # wrong value
with self.assertRaises(ValueError):
pk.compute_checksum(byte_seq)
# Too high value
byte_seq = (0x01, 0xffff, 0x02, 0x2b, 0x01) # wrong value
with self.assertRaises(ValueError):
pk.compute_checksum(byte_seq)
def test_checksum_func_wrong_id_byte(self):
"""Check that the compute_checksum function fails when the "id" byte
of the "byte_seq" argument has a wrong value (too high value)."""
byte_seq = (0xff,) # wrong id
byte_seq += (4,) # length
byte_seq += (0x02, 0x2b, 0x01) # read the temperature of the dynamixel
with self.assertRaises(ValueError):
pk.compute_checksum(byte_seq)
def test_checksum_func_wrong_length_byte(self):
"""Check that the compute_checksum function fails when the "length"
byte of the "byte_seq" argument has a wrong value (too low or too
high).
"""
# Too low value
byte_seq = (1,) # id
byte_seq += (1,) # wrong length
byte_seq += (0x02, 0x2b, 0x01) # read the temperature of the dynamixel
with self.assertRaises(ValueError):
pk.compute_checksum(byte_seq)
# Too high value
byte_seq = (1,) # id
byte_seq += (9,) # wrong length
byte_seq += (0x02, 0x2b, 0x01) # read the temperature of the dynamixel
with self.assertRaises(ValueError):
pk.compute_checksum(byte_seq)
def test_checksum_func_example1(self):
"""Check the "compute_checksum" function using the example 2 of the
Dynamixel user guide: "Reading the internal temperature of the
Dynamixel actuator with an ID of 1" (p.20)."""
byte_seq = (1,) # id
byte_seq += (4,) # length
byte_seq += (0x02, 0x2b, 0x01) # read the temperature of the dynamixel
checksum_byte = pk.compute_checksum(byte_seq)
expected_checksum_byte = 0xcc
self.assertEqual(checksum_byte, expected_checksum_byte)
# Tests for the Packet class ##############################################
# Test the "dynamixel_id" argument
def test_wrong_id_type(self):
"""Check that the instanciation of Packet fails when the argument
"dynamixel_id" has a wrong type."""
# Check with None
dynamixel_id = None # wrong id
data = (0x02, 0x2b, 0x01) # read internal temperature of the dynamixel
with self.assertRaises(TypeError):
pk.Packet(dynamixel_id, data)
# Check with float
dynamixel_id = 1.0 # wrong id
data = (0x02, 0x2b, 0x01) # read internal temperature of the dynamixel
with self.assertRaises(TypeError):
pk.Packet(dynamixel_id, data)
# Check with string
dynamixel_id = "hi" # wrong id
data = (0x02, 0x2b, 0x01) # read internal temperature of the dynamixel
with self.assertRaises(TypeError):
pk.Packet(dynamixel_id, data)
# Check with tuple
dynamixel_id = () # wrong id
data = (0x02, 0x2b, 0x01) # read internal temperature of the dynamixel
with self.assertRaises(TypeError):
pk.Packet(dynamixel_id, data)
def test_wrong_id_value(self):
"""Check that the instanciation of Packet fails when the argument
"dynamixel_id" has a wrong value (too low or too high)."""
# Too low
dynamixel_id = -1 # wrong id
data = (0x02, 0x2b, 0x01) # read internal temperature of the dynamixel
with self.assertRaises(ValueError):
pk.Packet(dynamixel_id, data)
# Too high
dynamixel_id = 1000 # wrong id
data = (0x02, 0x2b, 0x01) # read internal temperature of the dynamixel
with self.assertRaises(ValueError):
pk.Packet(dynamixel_id, data)
# Test the "data" argument
def test_wrong_data_type(self):
"""Check that the instanciation of Packet fails when the argument
"data" has a wrong type."""
# Check with None
dynamixel_id = 1
data = None # wrong type
with self.assertRaises(TypeError):
pk.Packet(dynamixel_id, data)
# Check with a float
dynamixel_id = 1
data = 1.0 # wrong type
with self.assertRaises(TypeError):
pk.Packet(dynamixel_id, data)
# Check with a string
dynamixel_id = 1
data = "hello" # wrong type
with self.assertRaises(TypeError):
pk.Packet(dynamixel_id, data)
def test_good_data_type(self):
"""Check that the instanciation of Packet doesn't fail when the
argument "data" has a right type."""
# Check with a tuple
dynamixel_id = 1
data = (0x02, 0x2b, 0x01)
try:
pk.Packet(dynamixel_id, data)
except (TypeError, ValueError):
self.fail("Encountered an unexpected exception.")
# Check with a list
dynamixel_id = 1
data = [0x02, 0x2b, 0x01]
try:
pk.Packet(dynamixel_id, data)
except (TypeError, ValueError):
self.fail("Encountered an unexpected exception.")
# Check with a bytes string
dynamixel_id = 1
data = bytes((0x02, 0x2b, 0x01))
try:
pk.Packet(dynamixel_id, data)
except (TypeError, ValueError):
self.fail("Encountered an unexpected exception.")
# Check with a bytearray
dynamixel_id = 1
data = bytearray((0x02, 0x2b, 0x01))
try:
pk.Packet(dynamixel_id, data)
except (TypeError, ValueError):
self.fail("Encountered an unexpected exception.")
# Check with an integer
dynamixel_id = 1
data = 0x01 # Ping packet
try:
pk.Packet(dynamixel_id, data)
except (TypeError, ValueError):
self.fail("Encountered an unexpected exception.")
def test_wrong_data_items_type(self):
"""Check that the instanciation of Packet fails when the "data"
items type is wrong."""
# Check with None
dynamixel_id = 1
data = (0x02, 0x2b, None) # wrong item type
with self.assertRaises(TypeError):
pk.Packet(dynamixel_id, data)
# Check with float
dynamixel_id = 1
data = (0x02, 0x2b, 1.0) # wrong item type
with self.assertRaises(TypeError):
pk.Packet(dynamixel_id, data)
# Check with string
dynamixel_id = 1
data = (0x02, 0x2b, "hi") # wrong item type
with self.assertRaises(TypeError):
pk.Packet(dynamixel_id, data)
# Check with tuple
dynamixel_id = 1
data = (0x02, 0x2b, ()) # wrong item type
with self.assertRaises(TypeError):
pk.Packet(dynamixel_id, data)
def test_wrong_data_items_value(self):
"""Check that the instanciation of Packet fails when the "data"
items value is wrong (too high or too low)."""
# Too high value
dynamixel_id = 1
data = (0x02, 0x2b, 0xffff) # wrong value
with self.assertRaises(ValueError):
pk.Packet(dynamixel_id, data)
# Too low value
dynamixel_id = 1
data = (0x02, 0x2b, -1) # wrong value
with self.assertRaises(ValueError):
pk.Packet(dynamixel_id, data)
###
def test_to_integer_tuple_func(self):
"""Check the "to_integer_tuple()" function.
Based on the Dynamixel user guide, example 2: "Reading the internal
temperature of the Dynamixel actuator with an ID of 1" (p.20).
"""
dynamixel_id = 1
data = (0x02, 0x2b, 0x01) # read internal temperature of the dynamixel
raw_packet = pk.Packet(dynamixel_id, data)
expected_str = (0xff, 0xff, 0x01, 0x04, 0x02, 0x2b, 0x01, 0xcc)
self.assertEqual(raw_packet.to_integer_tuple(), expected_str)
def test_to_printable_string_func(self):
"""Check the "to_printable_string()" function.
Based on the Dynamixel user guide, example 2: "Reading the internal
temperature of the Dynamixel actuator with an ID of 1" (p.20).
"""
dynamixel_id = 1
data = (0x02, 0x2b, 0x01) # read internal temperature of the dynamixel
raw_packet = pk.Packet(dynamixel_id, data)
expected_str = "ff ff 01 04 02 2b 01 cc"
self.assertEqual(raw_packet.to_printable_string(), expected_str)
def test_to_byte_array_func(self):
"""Check the "to_byte_array()" function.
Based on the Dynamixel user guide, example 2: "Reading the internal
temperature of the Dynamixel actuator with an ID of 1" (p.20).
"""
dynamixel_id = 1
data = (0x02, 0x2b, 0x01) # read internal temperature of the dynamixel
raw_packet = pk.Packet(dynamixel_id, data)
expected_str = bytearray(b'\xff\xff\x01\x04\x02\x2b\x01\xcc')
self.assertEqual(raw_packet.to_byte_array(), expected_str)
def test_to_bytes_func(self):
"""Check the "to_bytes()" function.
Based on the Dynamixel user guide, example 2: "Reading the internal
temperature of the Dynamixel actuator with an ID of 1" (p.20).
"""
dynamixel_id = 1
data = (0x02, 0x2b, 0x01) # read internal temperature of the dynamixel
raw_packet = pk.Packet(dynamixel_id, data)
expected_str = b'\xff\xff\x01\x04\x02\x2b\x01\xcc'
self.assertEqual(raw_packet.to_bytes(), expected_str)
if __name__ == '__main__':
unittest.main()
| |
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
from chainer.testing import condition
@testing.parameterize(*(testing.product({
'c_contiguous': [True, False],
'cover_all': [True, False],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
'dilate': [1],
'groups': [1, 2],
'nobias': [True, False],
}) + testing.product({
'c_contiguous': [False],
'cover_all': [False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [1],
'groups': [1, 2],
'nobias': [True, False],
})))
@backend.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward'],
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestConvolution2DFunction(unittest.TestCase):
def setUp(self):
batches = 2
in_channels_a_group = 3
out_channels_a_group = 2
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
self.stride = 2
self.pad = (int(kh / 2) * self.dilate, int(kw / 2) * self.dilate)
W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(self.W_dtype)
if self.nobias:
b = None
else:
b = numpy.random.uniform(
-1, 1, out_channels).astype(self.x_dtype)
x = numpy.random.uniform(
-1, 1, (batches, in_channels, 4, 3)).astype(self.x_dtype)
if self.cover_all:
gy = numpy.random.uniform(
-1, 1, (batches, out_channels, 3, 2)).astype(self.x_dtype)
else:
gy = numpy.random.uniform(
-1, 1, (batches, out_channels, 2, 2)).astype(self.x_dtype)
ggx = numpy.random.uniform(-1, 1, x.shape).astype(
self.x_dtype)
ggW = numpy.random.uniform(-1, 1, W.shape).astype(
self.W_dtype)
ggb = None if b is None else numpy.random.uniform(
-1, 1, b.shape).astype(self.x_dtype)
self.inputs = [x, W, b]
self.grad_outputs = [gy]
self.grad_grad_inputs = [ggx, ggW, ggb]
def forward(self, inputs):
x, W, b = inputs
x = chainer.Variable(x)
W = chainer.Variable(W)
b = None if b is None else chainer.Variable(b)
return F.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
def check_forward(self, inputs, backend_config):
with chainer.using_config('use_ideep', 'never'):
y_expected = self.forward(inputs)
inputs = backend_config.get_array(inputs)
with backend_config:
y_actual = self.forward(inputs)
testing.assert_allclose(
y_expected.data, y_actual.data, atol=1e-3, rtol=5e-3)
def test_forward(self, backend_config):
# TODO(hvy): chainerx does not support fp16 yet
if backend_config.use_chainerx:
if (any(x.dtype == numpy.float16
for x in self.inputs if x is not None)):
raise unittest.SkipTest('Not yet supported')
self.check_forward(self.inputs, backend_config)
def check_backward(self, inputs, grad_outputs, backend_config):
if self.nobias:
inputs = inputs[:-1]
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
if not self.c_contiguous:
inputs = testing.array._as_noncontiguous_array(inputs)
grad_outputs = testing.array._as_noncontiguous_array(grad_outputs)
def f(*args):
return F.convolution_2d(*args, stride=self.stride, pad=self.pad,
cover_all=self.cover_all,
dilate=self.dilate, groups=self.groups)
with backend_config:
gradient_check.check_backward(
f, inputs, grad_outputs, dtype='d', atol=5e-4, rtol=5e-3)
@condition.retry(3)
def test_backward(self, backend_config):
# TODO(hvy): chainerx does not support fp16 yet
if backend_config.use_chainerx:
if (any(x.dtype == numpy.float16
for x in self.inputs if x is not None)):
raise unittest.SkipTest('Not yet supported')
self.check_backward(self.inputs, self.grad_outputs, backend_config)
def check_double_backward(
self, inputs, grad_outputs, grad_grad_inputs, backend_config):
if self.nobias:
inputs = inputs[:-1]
grad_grad_inputs = grad_grad_inputs[:-1]
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
grad_grad_inputs = backend_config.get_array(grad_grad_inputs)
if not self.c_contiguous:
inputs = testing.array._as_noncontiguous_array(inputs)
grad_outputs = testing.array._as_noncontiguous_array(grad_outputs)
grad_grad_inputs = testing.array._as_noncontiguous_array(
grad_grad_inputs)
def f(*args):
return F.convolution_2d(
*args, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
with backend_config:
gradient_check.check_double_backward(
f, inputs, grad_outputs, grad_grad_inputs,
dtype='d', atol=5e-3, rtol=5e-2)
@condition.retry(3)
def test_double_backward(self, backend_config):
# TODO(hvy): chainerx does not support fp16 yet
if backend_config.use_chainerx:
if (any(x.dtype == numpy.float16
for x in self.inputs if x is not None)):
raise unittest.SkipTest('Not yet supported')
self.check_double_backward(
self.inputs, self.grad_outputs, self.grad_grad_inputs,
backend_config)
@testing.parameterize(*(testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [False, True],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [1],
'groups': [1, 2],
}) + testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [2],
'groups': [1, 2],
})))
@attr.cudnn
class TestConvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
batches = 2
in_channels_a_group = 3
out_channels_a_group = 2
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
self.stride = 2
self.pad = (int(kh / 2) * self.dilate, int(kw / 2) * self.dilate)
self.x = cuda.cupy.random.uniform(
-1, 1, (batches, in_channels, 4, 3)).astype(self.dtype)
self.W = cuda.cupy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(
-1, 1, (batches, out_channels, 2, 2)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.should_call_cudnn = chainer.should_use_cudnn('>=auto')
if self.dilate > 1 and cuda.cuda.cudnn.getVersion() < 6000:
self.should_call_cudnn = False
if self.groups > 1 and cuda.cuda.cudnn.getVersion() < 7000:
self.should_call_cudnn = False
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return F.convolution_2d(x, W, None, stride=self.stride, pad=self.pad,
dilate=self.dilate, groups=self.groups)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
with testing.patch('cupy.cudnn.convolution_forward') as func:
self.forward()
self.assertEqual(func.called, self.should_call_cudnn)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
y = self.forward()
y.grad = self.gy
name = 'cupy.cudnn.convolution_backward_data'
with testing.patch(name) as func:
y.backward()
self.assertEqual(func.called, self.should_call_cudnn)
@testing.parameterize(*testing.product({
'c_contiguous': [True, False],
'nobias': [True, False],
'groups': [1, 2],
}))
@attr.gpu
@attr.cudnn
class TestConvolution2DFunctionCudnnDeterministic(unittest.TestCase):
def setUp(self):
self.stride = 2
self.pad = 1
batch_sz = 2
in_channels_a_group = 64
out_channels_a_group = 64
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
in_h, in_w = (32, 128)
out_h, out_w = (16, 64)
# should be same types for cudnn test
x_dtype = numpy.float32
W_dtype = numpy.float32
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(W_dtype)
self.b = numpy.random.uniform(-1, 1, out_channels).astype(x_dtype)
self.x = numpy.random.uniform(
-1, 1, (batch_sz, in_channels, in_h, in_w)).astype(x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (batch_sz, out_channels, out_h, out_w)).astype(x_dtype)
self.should_call_cudnn = True
if self.groups > 1 and cuda.cuda.cudnn.getVersion() < 7000:
self.should_call_cudnn = False
def test_called(self):
with testing.patch(
'cupy.cudnn.convolution_backward_filter', autospec=True) as f:
# cuDNN version >= v3 supports `cudnn_deterministic` option
self._run()
# in Convolution2DFunction.backward_gpu()
assert f.called == self.should_call_cudnn
def test_cudnn_deterministic(self):
x1, W1, b1, y1 = self._run()
x2, W2, b2, y2 = self._run()
cuda.cupy.testing.assert_array_equal(x1.grad, x2.grad)
cuda.cupy.testing.assert_array_equal(y1.data, y2.data)
cuda.cupy.testing.assert_array_equal(W1.grad, W2.grad)
def _contiguous(self, x_data, W_data, b_data, gy_data):
if not self.c_contiguous:
x_data = numpy.asfortranarray(x_data)
W_data = numpy.asfortranarray(W_data)
gy_data = numpy.asfortranarray(gy_data)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(gy_data.flags.c_contiguous)
b = numpy.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
return x_data, W_data, b_data, gy_data
def _run(self):
with chainer.using_config('use_cudnn', 'always'):
with chainer.using_config('cudnn_deterministic', True):
# verify data continuity and move to gpu
x_data, W_data, b_data, gy_data = tuple(
cuda.to_gpu(data) for data in self._contiguous(
self.x, self.W, self.b, self.gy))
x, W, b, y = self._run_forward(x_data, W_data, b_data)
y.grad = gy_data
y.backward()
return x, W, b, y
def _run_forward(self, x_data, W_data, b_data):
x = chainer.Variable(x_data)
W = chainer.Variable(W_data)
b = None if self.nobias else chainer.Variable(b_data)
y = F.convolution_2d(x, W, b, stride=self.stride, pad=self.pad,
cover_all=False, groups=self.groups)
return x, W, b, y
class TestConvolution2DBackwardNoncontiguousGradOutputs(unittest.TestCase):
# NumPy raises an error when the inputs of dot operation are not
# contiguous. This test ensures this issue is correctly handled.
# (https://github.com/chainer/chainer/issues/2744)
# This test depdends on that backward() of F.sum generates
# a non-contiguous array.
def test_1(self):
n_batches = 2
in_channels = 3
out_channels = 1 # important
x_shape = (n_batches, in_channels, 10, 10)
w_shape = (out_channels, in_channels, 3, 3)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = F.convolution_2d(x, chainer.Variable(w))
z = F.sum(y)
z.backward()
testing.run_module(__name__, __file__)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from nova import exception
from nova.openstack.common import jsonutils
def ensure_string_keys(d):
# http://bugs.python.org/issue4978
return dict([(str(k), v) for k, v in d.iteritems()])
# Constants for the 'vif_type' field in VIF class
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_OTHER = 'other'
# Constant for max length of network interface names
# eg 'bridge' in the Network class or 'devname' in
# the VIF class
NIC_NAME_LEN = 14
class Model(dict):
"""Defines some necessary structures for most of the network models."""
def __repr__(self):
return self.__class__.__name__ + '(' + dict.__repr__(self) + ')'
def _set_meta(self, kwargs):
# pull meta out of kwargs if it's there
self['meta'] = kwargs.pop('meta', {})
# update meta with any additional kwargs that may exist
self['meta'].update(kwargs)
def get_meta(self, key, default=None):
"""calls get(key, default) on self['meta']."""
return self['meta'].get(key, default)
class IP(Model):
"""Represents an IP address in Nova."""
def __init__(self, address=None, type=None, **kwargs):
super(IP, self).__init__()
self['address'] = address
self['type'] = type
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
# determine version from address if not passed in
if self['address'] and not self['version']:
try:
self['version'] = netaddr.IPAddress(self['address']).version
except netaddr.AddrFormatError as e:
raise exception.InvalidIpAddressError(self['address'])
def __eq__(self, other):
return self['address'] == other['address']
def is_in_subnet(self, subnet):
if self['address'] and subnet['cidr']:
return (netaddr.IPAddress(self['address']) in
netaddr.IPNetwork(subnet['cidr']))
else:
return False
@classmethod
def hydrate(cls, ip):
if ip:
return IP(**ensure_string_keys(ip))
return None
class FixedIP(IP):
"""Represents a Fixed IP address in Nova."""
def __init__(self, floating_ips=None, **kwargs):
super(FixedIP, self).__init__(**kwargs)
self['floating_ips'] = floating_ips or []
if not self['type']:
self['type'] = 'fixed'
def add_floating_ip(self, floating_ip):
if floating_ip not in self['floating_ips']:
self['floating_ips'].append(floating_ip)
def floating_ip_addresses(self):
return [ip['address'] for ip in self['floating_ips']]
@classmethod
def hydrate(cls, fixed_ip):
fixed_ip = FixedIP(**ensure_string_keys(fixed_ip))
fixed_ip['floating_ips'] = [IP.hydrate(floating_ip)
for floating_ip in fixed_ip['floating_ips']]
return fixed_ip
class Route(Model):
"""Represents an IP Route in Nova."""
def __init__(self, cidr=None, gateway=None, interface=None, **kwargs):
super(Route, self).__init__()
self['cidr'] = cidr
self['gateway'] = gateway
self['interface'] = interface
self._set_meta(kwargs)
@classmethod
def hydrate(cls, route):
route = Route(**ensure_string_keys(route))
route['gateway'] = IP.hydrate(route['gateway'])
return route
class Subnet(Model):
"""Represents a Subnet in Nova."""
def __init__(self, cidr=None, dns=None, gateway=None, ips=None,
routes=None, **kwargs):
super(Subnet, self).__init__()
self['cidr'] = cidr
self['dns'] = dns or []
self['gateway'] = gateway
self['ips'] = ips or []
self['routes'] = routes or []
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
if self['cidr'] and not self['version']:
self['version'] = netaddr.IPNetwork(self['cidr']).version
def __eq__(self, other):
return self['cidr'] == other['cidr']
def add_route(self, new_route):
if new_route not in self['routes']:
self['routes'].append(new_route)
def add_dns(self, dns):
if dns not in self['dns']:
self['dns'].append(dns)
def add_ip(self, ip):
if ip not in self['ips']:
self['ips'].append(ip)
def as_netaddr(self):
"""Convience function to get cidr as a netaddr object."""
return netaddr.IPNetwork(self['cidr'])
@classmethod
def hydrate(cls, subnet):
subnet = Subnet(**ensure_string_keys(subnet))
subnet['dns'] = [IP.hydrate(dns) for dns in subnet['dns']]
subnet['ips'] = [FixedIP.hydrate(ip) for ip in subnet['ips']]
subnet['routes'] = [Route.hydrate(route) for route in subnet['routes']]
subnet['gateway'] = IP.hydrate(subnet['gateway'])
return subnet
class Network(Model):
"""Represents a Network in Nova."""
def __init__(self, id=None, bridge=None, label=None,
subnets=None, **kwargs):
super(Network, self).__init__()
self['id'] = id
self['bridge'] = bridge
self['label'] = label
self['subnets'] = subnets or []
self._set_meta(kwargs)
def add_subnet(self, subnet):
if subnet not in self['subnets']:
self['subnets'].append(subnet)
@classmethod
def hydrate(cls, network):
if network:
network = Network(**ensure_string_keys(network))
network['subnets'] = [Subnet.hydrate(subnet)
for subnet in network['subnets']]
return network
class VIF8021QbgParams(Model):
"""Represents the parameters for a 802.1qbg VIF."""
def __init__(self, managerid, typeid, typeidversion, instanceid):
self['managerid'] = managerid
self['typeid'] = typeid
self['typeidversion'] = typeidversion
self['instanceid'] = instanceid
class VIF8021QbhParams(Model):
"""Represents the parameters for a 802.1qbh VIF."""
def __init__(self, profileid):
self['profileid'] = profileid
class VIF(Model):
"""Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
devname=None, ovs_interfaceid=None,
qbh_params=None, qbg_params=None,
**kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
self['type'] = type
self['devname'] = devname
self['ovs_interfaceid'] = ovs_interfaceid
self['qbh_params'] = qbh_params
self['qbg_params'] = qbg_params
self._set_meta(kwargs)
def __eq__(self, other):
return self['id'] == other['id']
def fixed_ips(self):
return [fixed_ip for subnet in self['network']['subnets']
for fixed_ip in subnet['ips']]
def floating_ips(self):
return [floating_ip for fixed_ip in self.fixed_ips()
for floating_ip in fixed_ip['floating_ips']]
def labeled_ips(self):
"""Returns the list of all IPs
The return value looks like this flat structure::
{'network_label': 'my_network',
'network_id': 'n8v29837fn234782f08fjxk3ofhb84',
'ips': [{'address': '123.123.123.123',
'version': 4,
'type: 'fixed',
'meta': {...}},
{'address': '124.124.124.124',
'version': 4,
'type': 'floating',
'meta': {...}},
{'address': 'fe80::4',
'version': 6,
'type': 'fixed',
'meta': {...}}]
"""
if self['network']:
# remove unnecessary fields on fixed_ips
ips = [IP(**ensure_string_keys(ip)) for ip in self.fixed_ips()]
for ip in ips:
# remove floating ips from IP, since this is a flat structure
# of all IPs
del ip['meta']['floating_ips']
# add floating ips to list (if any)
ips.extend(self.floating_ips())
return {'network_label': self['network']['label'],
'network_id': self['network']['id'],
'ips': ips}
return []
@classmethod
def hydrate(cls, vif):
vif = VIF(**ensure_string_keys(vif))
vif['network'] = Network.hydrate(vif['network'])
return vif
class NetworkInfo(list):
"""Stores and manipulates network information for a Nova instance."""
# NetworkInfo is a list of VIFs
def fixed_ips(self):
"""Returns all fixed_ips without floating_ips attached."""
return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self):
"""Returns all floating_ips."""
return [ip for vif in self for ip in vif.floating_ips()]
@classmethod
def hydrate(cls, network_info):
if isinstance(network_info, basestring):
network_info = jsonutils.loads(network_info)
return NetworkInfo([VIF.hydrate(vif) for vif in network_info])
def json(self):
return jsonutils.dumps(self)
def legacy(self):
"""
Return the legacy network_info representation of self
"""
def get_ip(ip):
if not ip:
return None
return ip['address']
def fixed_ip_dict(ip, subnet):
if ip['version'] == 4:
netmask = str(subnet.as_netaddr().netmask)
else:
netmask = subnet.as_netaddr()._prefixlen
return {'ip': ip['address'],
'enabled': '1',
'netmask': netmask,
'gateway': get_ip(subnet['gateway'])}
def convert_routes(routes):
routes_list = []
for route in routes:
r = {'route': str(netaddr.IPNetwork(route['cidr']).network),
'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
'gateway': get_ip(route['gateway'])}
routes_list.append(r)
return routes_list
network_info = []
for vif in self:
# if vif doesn't have network or that network has no subnets, quit
if not vif['network'] or not vif['network']['subnets']:
continue
network = vif['network']
# NOTE(jkoelker) The legacy format only supports one subnet per
# network, so we only use the 1st one of each type
# NOTE(tr3buchet): o.O
v4_subnets = []
v6_subnets = []
for subnet in vif['network']['subnets']:
if subnet['version'] == 4:
v4_subnets.append(subnet)
else:
v6_subnets.append(subnet)
subnet_v4 = None
subnet_v6 = None
if v4_subnets:
subnet_v4 = v4_subnets[0]
if v6_subnets:
subnet_v6 = v6_subnets[0]
if not subnet_v4:
msg = _('v4 subnets are required for legacy nw_info')
raise exception.NovaException(message=msg)
routes = convert_routes(subnet_v4['routes'])
should_create_bridge = network.get_meta('should_create_bridge',
False)
should_create_vlan = network.get_meta('should_create_vlan', False)
gateway = get_ip(subnet_v4['gateway'])
dhcp_server = subnet_v4.get_meta('dhcp_server', gateway)
network_dict = {
'bridge': network['bridge'],
'id': network['id'],
'cidr': subnet_v4['cidr'],
'cidr_v6': subnet_v6['cidr'] if subnet_v6 else None,
'vlan': network.get_meta('vlan'),
'injected': network.get_meta('injected', False),
'multi_host': network.get_meta('multi_host', False),
'bridge_interface': network.get_meta('bridge_interface')
}
# NOTE(tr3buchet): 'ips' bit here is tricky, we support a single
# subnet but we want all the IPs to be there
# so use the v4_subnets[0] and its IPs are first
# so that eth0 will be from subnet_v4, the rest of
# the IPs will be aliased eth0:1 etc and the
# gateways from their subnets will not be used
info_dict = {'label': network['label'],
'broadcast': str(subnet_v4.as_netaddr().broadcast),
'mac': vif['address'],
'vif_type': vif['type'],
'vif_devname': vif.get('devname'),
'vif_uuid': vif['id'],
'ovs_interfaceid': vif.get('ovs_interfaceid'),
'qbh_params': vif.get('qbh_params'),
'qbg_params': vif.get('qbg_params'),
'rxtx_cap': vif.get_meta('rxtx_cap', 0),
'dns': [get_ip(ip) for ip in subnet_v4['dns']],
'ips': [fixed_ip_dict(ip, subnet)
for subnet in v4_subnets
for ip in subnet['ips']],
'should_create_bridge': should_create_bridge,
'should_create_vlan': should_create_vlan,
'dhcp_server': dhcp_server}
if routes:
info_dict['routes'] = routes
if gateway:
info_dict['gateway'] = gateway
if v6_subnets:
if subnet_v6['gateway']:
info_dict['gateway_v6'] = get_ip(subnet_v6['gateway'])
# NOTE(tr3buchet): only supporting single v6 subnet here
info_dict['ip6s'] = [fixed_ip_dict(ip, subnet_v6)
for ip in subnet_v6['ips']]
network_info.append((network_dict, info_dict))
return network_info
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for binary coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class BinaryOpTest(test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
# Test that the op takes precedence over numpy operators.
np_left = tf_func(x, iny).eval()
np_right = tf_func(inx, y).eval()
if also_compare_variables:
var_x = variables.Variable(x)
var_y = variables.Variable(y)
variables.global_variables_initializer().run()
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = tf_func(x, var_y).eval()
np_var_right = tf_func(var_x, y).eval()
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
if also_compare_variables:
self.assertAllClose(np_ans, np_var_left)
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
_GRAD_TOL = {
dtypes_lib.float16: 1e-3,
dtypes_lib.float32: 1e-3,
dtypes_lib.complex64: 1e-2,
dtypes_lib.float64: 1e-5,
dtypes_lib.complex128: 1e-4
}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, xs, outf, zs, x_init_value=xf, delta=1e-3)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, ys, outf, zs, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(force_gpu=test_util.is_gpu_available()):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
if x.dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
if tf_func not in (_FLOORDIV, math_ops.floordiv, math_ops.zeta,
math_ops.polygamma):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
if tf_func in (math_ops.zeta, math_ops.polygamma):
# These methods only support gradients in the second parameter
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(x, y, np.add, math_ops.add, also_compare_variables=True)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.arctan2, math_ops.atan2)
x1 = np.random.randn(5, 6).astype(np.float32)
x2 = np.random.randn(5, 6).astype(np.float32)
# Remove tiny values--atan2 gradients are flaky near the origin.
x1[np.abs(x1) < 0.05] = 0.05 * np.sign(x1[np.abs(x1) < 0.05])
x2[np.abs(x2) < 0.05] = 0.05 * np.sign(x2[np.abs(x2) < 0.05])
self._compareBoth(x1, x2, np.arctan2, math_ops.atan2)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
math_ops.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
math_ops.igammac)
# Need x > 1
self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta,
math_ops.zeta)
n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(n_small, x_pos_small, special.polygamma,
math_ops.polygamma)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
def testFloatDifferentShapes(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
with self.cached_session() as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
s = math_ops.reduce_sum(inx * iny)
gx, gy = sess.run(gradients_impl.gradients(s, [inx, iny]))
# gx is simply the broadcasted y
self.assertAllEqual(gx,
np.array([1, 1, 2, 2]).reshape(2, 2).astype(np.float32))
# gy is x's column summed up
self.assertAllEqual(gy, np.array([3, 7]).reshape(2, 1).astype(np.float32))
def testFloatVariableOverload(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.int32)
y = np.array([1, 2]).reshape(2, 1).astype(np.int32)
var_x = variables.Variable(x)
var_y = variables.Variable(y)
with self.cached_session() as sess:
sess.run([var_x.initializer, var_y.initializer])
left_result = (var_x * y).eval()
right_result = (x * var_y).eval()
np_result = x * y
self.assertAllEqual(np_result, left_result)
self.assertAllEqual(np_result, right_result)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.arctan2, math_ops.atan2)
x1 = np.random.randn(7, 4).astype(np.float64)
x2 = np.random.randn(7, 4).astype(np.float64)
# Remove tiny values--atan2 gradients are flaky near the origin.
x1[np.abs(x1) < 0.5] = 0.5 * np.sign(x1[np.abs(x1) < 0.5])
x2[np.abs(x2) < 0.5] = 0.5 * np.sign(x2[np.abs(x2) < 0.5])
self._compareBoth(x1, x2, np.arctan2, math_ops.atan2)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
math_ops.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
math_ops.igammac)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
def testUint8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint8)
self._compareBoth(x, y, np.add, math_ops.add)
def testInt8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
def testUint16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint16)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
def testInt64Basic(self):
x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex64)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex64)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
def testComplex128Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex128)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex128)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
def testStringComparison(self):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with self.test_session(use_gpu=False) as sess:
cmp_eq = math_ops.equal(x, y)
cmp_not_eq = math_ops.not_equal(x, y)
values = sess.run([cmp_eq, cmp_not_eq])
self.assertAllEqual([[True, True], [False, False]], values[0])
self.assertAllEqual([[False, False], [True, True]], values[1])
def testString(self):
x = np.array([["x_0_0", "x_0_1", "x_0_2"], ["x_1_0", "x_1_1", "x_1_2"],
["x_2_0", "x_2_1", "x_2_2"]],
dtype=np.object)
y = np.array([["y_0_0", "y_0_1", "y_0_2"], ["y_1_0", "y_1_1", "y_1_2"],
["y_2_0", "y_2_1", "y_2_2"]],
dtype=np.object)
z = np.array([["z_0", "z_1", "z_2"]], dtype=np.object)
w = np.array("w", dtype=np.object)
self._compareCpu(x, y, _ADD, _ADD)
self._compareCpu(x, z, _ADD, _ADD)
self._compareCpu(x, w, _ADD, _ADD)
self._compareCpu(z, w, _ADD, _ADD)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
if dtype in (np.complex64, np.complex128):
x = (1 + np.linspace(0, 2 + 3j, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 2 - 2j, np.prod(ys))).astype(dtype).reshape(ys)
else:
x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float16, np.float32, np.float64):
# TODO(aselle): Make the test work for dtypes:
# (np.complex64, np.complex128).
if tf_func not in (_FLOORDIV, math_ops.floordiv):
if x.dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(x, y, np_func, tf_func, np.float)
self._compareGradientY(x, y, np_func, tf_func, np.float)
else:
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
# TODO(josh11b,vrv): Refactor this to use parameterized tests.
def _testBCastByFunc(self, funcs, xs, ys):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
np.complex64,
np.complex128,
]
for dtype in dtypes:
for (np_func, tf_func) in funcs:
if (dtype in (np.complex64, np.complex128) and
tf_func in (_FLOORDIV, math_ops.floordiv)):
continue # floordiv makes no sense for complex numbers
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
(np.add, math_ops.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
(np.subtract, math_ops.subtract),
(np.subtract, _SUB),
(np.power, math_ops.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
(np.multiply, math_ops.multiply),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
(np.true_divide, math_ops.truediv),
(np.floor_divide, math_ops.floordiv),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
self._testBCastByFunc(funcs, xs, ys)
def testBCast_0A(self):
self._testBCastA([1, 3, 2], [1])
def testBCast_0B(self):
self._testBCastB([1, 3, 2], [1])
def testBCast_0C(self):
self._testBCastC([1, 3, 2], [1])
def testBCast_0D(self):
self._testBCastD([1, 3, 2], [1])
def testBCast_1A(self):
self._testBCastA([1, 3, 2], [2])
def testBCast_1B(self):
self._testBCastB([1, 3, 2], [2])
def testBCast_1C(self):
self._testBCastC([1, 3, 2], [2])
def testBCast_1D(self):
self._testBCastD([1, 3, 2], [2])
def testBCast_2A(self):
self._testBCastA([1, 3, 2], [3, 2])
def testBCast_2B(self):
self._testBCastB([1, 3, 2], [3, 2])
def testBCast_2C(self):
self._testBCastC([1, 3, 2], [3, 2])
def testBCast_2D(self):
self._testBCastD([1, 3, 2], [3, 2])
def testBCast_3A(self):
self._testBCastA([1, 3, 2], [3, 1])
def testBCast_3B(self):
self._testBCastB([1, 3, 2], [3, 1])
def testBCast_3C(self):
self._testBCastC([1, 3, 2], [3, 1])
def testBCast_3D(self):
self._testBCastD([1, 3, 2], [3, 1])
def testBCast_4A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_4B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_4C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_4D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_5A(self):
self._testBCastA([1, 3, 2], [2, 3, 1])
def testBCast_5B(self):
self._testBCastB([1, 3, 2], [2, 3, 1])
def testBCast_5C(self):
self._testBCastC([1, 3, 2], [2, 3, 1])
def testBCast_5D(self):
self._testBCastD([1, 3, 2], [2, 3, 1])
def testBCast_6A(self):
self._testBCastA([1, 3, 2], [2, 1, 1])
def testBCast_6B(self):
self._testBCastB([1, 3, 2], [2, 1, 1])
def testBCast_6C(self):
self._testBCastC([1, 3, 2], [2, 1, 1])
def testBCast_6D(self):
self._testBCastD([1, 3, 2], [2, 1, 1])
def testBCast_7A(self):
self._testBCastA([1, 3, 2], [1, 3, 1])
def testBCast_7B(self):
self._testBCastB([1, 3, 2], [1, 3, 1])
def testBCast_7C(self):
self._testBCastC([1, 3, 2], [1, 3, 1])
def testBCast_7D(self):
self._testBCastD([1, 3, 2], [1, 3, 1])
def testBCast_8A(self):
self._testBCastA([2, 1, 5], [2, 3, 1])
def testBCast_8B(self):
self._testBCastB([2, 1, 5], [2, 3, 1])
def testBCast_8C(self):
self._testBCastC([2, 1, 5], [2, 3, 1])
def testBCast_8D(self):
self._testBCastD([2, 1, 5], [2, 3, 1])
def testBCast_9A(self):
self._testBCastA([2, 0, 5], [2, 0, 1])
def testBCast_9B(self):
self._testBCastB([2, 0, 5], [2, 0, 1])
def testBCast_9C(self):
self._testBCastC([2, 0, 5], [2, 0, 1])
def testBCast_9D(self):
self._testBCastD([2, 0, 5], [2, 0, 1])
def testBCast_10A(self):
self._testBCastA([2, 3, 0], [2, 3, 1])
def testBCast_10B(self):
self._testBCastB([2, 3, 0], [2, 3, 1])
def testBCast_10C(self):
self._testBCastC([2, 3, 0], [2, 3, 1])
def testBCast_10D(self):
self._testBCastD([2, 3, 0], [2, 3, 1])
def testBCast_11A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_11B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_11C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_11D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_12A(self):
self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12B(self):
self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12C(self):
self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12D(self):
self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_13A(self):
self._testBCastA([1, 3, 2, 1, 1], [1])
def testBCast_13B(self):
self._testBCastB([1, 3, 2, 1, 1], [1])
def testBCast_13C(self):
self._testBCastC([1, 3, 2, 1, 1], [1])
def testBCast_13D(self):
self._testBCastD([1, 3, 2, 1, 1], [1])
def testBCast_14A(self):
self._testBCastA([2, 3, 1, 1, 5], [1])
def testBCast_14B(self):
self._testBCastB([2, 3, 1, 1, 5], [1])
def testBCast_14C(self):
self._testBCastC([2, 3, 1, 1, 5], [1])
def testBCast_14D(self):
self._testBCastD([2, 3, 1, 1, 5], [1])
def testBCast_15A(self):
self._testBCastA([10, 3, 1, 2], [3, 1, 2])
def testBCast_15B(self):
self._testBCastB([10, 3, 1, 2], [3, 1, 2])
def testBCast_15C(self):
self._testBCastC([10, 3, 1, 2], [3, 1, 2])
def testBCast_15D(self):
self._testBCastD([10, 3, 1, 2], [3, 1, 2])
def testMismatchedDimensions(self):
for func in [
math_ops.add, math_ops.subtract, math_ops.multiply, math_ops.div, _ADD,
_SUB, _MUL, _TRUEDIV, _FLOORDIV
]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
func(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testZeroPowGrad(self):
with self.cached_session():
for dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
x = constant_op.constant(0.0, dtype=dtype)
y = constant_op.constant(2.0, dtype=dtype)
z = math_ops.pow(x, y)
error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertEqual(error, 0)
def testComplexPowGrad(self):
with self.cached_session():
for dtype in np.complex64, np.complex128:
for base in 2.0, -2.0:
x = constant_op.constant(base, dtype=dtype)
y = constant_op.constant(2.0, dtype=dtype)
z = math_ops.pow(x, y)
error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertLess(error, 2e-4)
def testAtan2SpecialValues(self):
x1l, x2l = zip((+0.0, +0.0), (+0.0, -0.0), (-0.0, +0.0), (-0.0, -0.0),
(1.2345, float("inf")), (1.2345, -float("inf")),
(-4.321, float("inf")), (-4.125, -float("inf")),
(float("inf"), float("inf")), (float("inf"), -float("inf")),
(-float("inf"), float("inf")),
(-float("inf"), -float("inf")))
for dtype in np.float32, np.float64:
x1 = np.array(x1l).astype(dtype)
x2 = np.array(x2l).astype(dtype)
self._compareCpu(x1, x2, np.arctan2, math_ops.atan2)
self._compareGpu(x1, x2, np.arctan2, math_ops.atan2)
def testPowNegativeExponent(self):
for dtype in [np.int32, np.int64]:
with self.test_session(use_gpu=False) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([-2, 3]).astype(dtype)
sess.run(math_ops.pow(x, y))
with self.test_session(use_gpu=False) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([2, -3]).astype(dtype)
sess.run(math_ops.pow(x, y))
with self.test_session(use_gpu=False) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = -3
sess.run(math_ops.pow(x, y))
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with self.test_session(force_gpu=test_util.is_gpu_available()):
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
self.assertEqual(
self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(force_gpu=test_util.is_gpu_available()):
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]:
xt = x.astype(t)
xt -= 1j * xt
yt = y.astype(t)
yt -= 1j * yt
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
if dtype in (np.complex64, np.complex128):
x -= 1j * x
y -= 1j * y
self._compare(x, y, np_func, tf_func)
self._compare(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func, include_complex=False):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
if include_complex:
dtypes.extend([np.complex64, np.complex128])
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)
def testBCastNotEqual(self):
self._testBCastByFunc(
np.not_equal, math_ops.not_equal, include_complex=True)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x.astype(t), y.astype(t))
if __name__ == "__main__":
test.main()
| |
import json
from functools import partial
from time import time
from urllib.parse import urlencode
from geopy.exc import (
ConfigurationError,
GeocoderAuthenticationFailure,
GeocoderServiceError,
)
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder, _synchronized
from geopy.location import Location
from geopy.util import logger
__all__ = ("ArcGIS", )
DEFAULT_WKID = 4326
class ArcGIS(Geocoder):
"""Geocoder using the ERSI ArcGIS API.
Documentation at:
https://developers.arcgis.com/rest/geocode/api-reference/overview-world-geocoding-service.htm
"""
_TOKEN_EXPIRED = 498
auth_path = '/sharing/generateToken'
geocode_path = '/arcgis/rest/services/World/GeocodeServer/findAddressCandidates'
reverse_path = '/arcgis/rest/services/World/GeocodeServer/reverseGeocode'
def __init__(
self,
username=None,
password=None,
*,
referer=None,
token_lifetime=60,
scheme=None,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
ssl_context=DEFAULT_SENTINEL,
adapter_factory=None,
auth_domain='www.arcgis.com',
domain='geocode.arcgis.com'
):
"""
:param str username: ArcGIS username. Required if authenticated
mode is desired.
:param str password: ArcGIS password. Required if authenticated
mode is desired.
:param str referer: Required if authenticated mode is desired.
`Referer` HTTP header to send with each request,
e.g., ``'http://www.example.com'``. This is tied to an issued token,
so fielding queries for multiple referrers should be handled by
having multiple ArcGIS geocoder instances.
:param int token_lifetime: Desired lifetime, in minutes, of an
ArcGIS-issued token.
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`.
If authenticated mode is in use, it must be ``'https'``.
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
:param callable adapter_factory:
See :attr:`geopy.geocoders.options.default_adapter_factory`.
.. versionadded:: 2.0
:param str auth_domain: Domain where the target ArcGIS auth service
is hosted. Used only in authenticated mode (i.e. username,
password and referer are set).
:param str domain: Domain where the target ArcGIS service
is hosted.
"""
super().__init__(
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
adapter_factory=adapter_factory,
)
if username or password or referer:
if not (username and password and referer):
raise ConfigurationError(
"Authenticated mode requires username,"
" password, and referer"
)
if self.scheme != 'https':
raise ConfigurationError(
"Authenticated mode requires scheme of 'https'"
)
self.username = username
self.password = password
self.referer = referer
self.auth_domain = auth_domain.strip('/')
self.auth_api = (
'%s://%s%s' % (self.scheme, self.auth_domain, self.auth_path)
)
self.token_lifetime = token_lifetime * 60 # store in seconds
self.domain = domain.strip('/')
self.api = (
'%s://%s%s' % (self.scheme, self.domain, self.geocode_path)
)
self.reverse_api = (
'%s://%s%s' % (self.scheme, self.domain, self.reverse_path)
)
# Mutable state
self.token = None
self.token_expiry = None
def geocode(self, query, *, exactly_one=True, timeout=DEFAULT_SENTINEL,
out_fields=None):
"""
Return a location point by address.
:param str query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param out_fields: A list of output fields to be returned in the
attributes field of the raw data. This can be either a python
list/tuple of fields or a comma-separated string. See
https://developers.arcgis.com/rest/geocode/api-reference/geocoding-service-output.htm
for a list of supported output fields. If you want to return all
supported output fields, set ``out_fields="*"``.
:type out_fields: str or iterable
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
params = {'singleLine': query, 'f': 'json'}
if exactly_one:
params['maxLocations'] = 1
if out_fields is not None:
if isinstance(out_fields, str):
params['outFields'] = out_fields
else:
params['outFields'] = ",".join(out_fields)
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
callback = partial(self._parse_geocode, exactly_one=exactly_one)
return self._authenticated_call_geocoder(url, callback, timeout=timeout)
def _parse_geocode(self, response, exactly_one):
if 'error' in response:
raise GeocoderServiceError(str(response['error']))
# Success; convert from the ArcGIS JSON format.
if not len(response['candidates']):
return None
geocoded = []
for resource in response['candidates']:
geometry = resource['location']
geocoded.append(
Location(
resource['address'], (geometry['y'], geometry['x']), resource
)
)
if exactly_one:
return geocoded[0]
return geocoded
def reverse(self, query, *, exactly_one=True, timeout=DEFAULT_SENTINEL,
distance=None):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param int distance: Distance from the query location, in meters,
within which to search. ArcGIS has a default of 100 meters, if not
specified.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
location = self._coerce_point_to_string(query, "%(lon)s,%(lat)s")
wkid = DEFAULT_WKID
params = {'location': location, 'f': 'json', 'outSR': wkid}
if distance is not None:
params['distance'] = distance
url = "?".join((self.reverse_api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
callback = partial(self._parse_reverse, exactly_one=exactly_one)
return self._authenticated_call_geocoder(url, callback, timeout=timeout)
def _parse_reverse(self, response, exactly_one):
if not len(response):
return None
if 'error' in response:
# https://developers.arcgis.com/rest/geocode/api-reference/geocoding-service-output.htm
if response['error']['code'] == 400:
# 'details': ['Unable to find address for the specified location.']}
try:
if 'Unable to find' in response['error']['details'][0]:
return None
except (KeyError, IndexError):
pass
raise GeocoderServiceError(str(response['error']))
address = (
"%(Address)s, %(City)s, %(Region)s %(Postal)s,"
" %(CountryCode)s" % response['address']
)
location = Location(
address,
(response['location']['y'], response['location']['x']),
response['address']
)
if exactly_one:
return location
else:
return [location]
def _authenticated_call_geocoder(
self, url, parse_callback, *, timeout=DEFAULT_SENTINEL
):
if not self.username:
return self._call_geocoder(url, parse_callback, timeout=timeout)
def query_callback():
call_url = "&".join((url, urlencode({"token": self.token})))
headers = {"Referer": self.referer}
return self._call_geocoder(
call_url,
partial(maybe_reauthenticate_callback, from_token=self.token),
timeout=timeout,
headers=headers,
)
def maybe_reauthenticate_callback(response, *, from_token):
if "error" in response:
if response["error"]["code"] == self._TOKEN_EXPIRED:
return self._refresh_authentication_token(
query_retry_callback, timeout=timeout, from_token=from_token
)
return parse_callback(response)
def query_retry_callback():
call_url = "&".join((url, urlencode({"token": self.token})))
headers = {"Referer": self.referer}
return self._call_geocoder(
call_url, parse_callback, timeout=timeout, headers=headers
)
if self.token is None or int(time()) > self.token_expiry:
return self._refresh_authentication_token(
query_callback, timeout=timeout, from_token=self.token
)
else:
return query_callback()
@_synchronized
def _refresh_authentication_token(self, callback_success, *, timeout, from_token):
if from_token != self.token:
# Token has already been updated by a concurrent call.
return callback_success()
token_request_arguments = {
'username': self.username,
'password': self.password,
'referer': self.referer,
'expiration': self.token_lifetime,
'f': 'json'
}
url = "?".join((self.auth_api, urlencode(token_request_arguments)))
logger.debug(
"%s._refresh_authentication_token: %s",
self.__class__.__name__, url
)
def cb(response):
if "token" not in response:
raise GeocoderAuthenticationFailure(
"Missing token in auth request."
"Request URL: %s; response JSON: %s" % (url, json.dumps(response))
)
self.token = response["token"]
self.token_expiry = int(time()) + self.token_lifetime
return callback_success()
return self._call_geocoder(url, cb, timeout=timeout)
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import datetime, timedelta
from sentry.models import (
GroupAssignee, GroupBookmark, GroupStatus, GroupSubscription, GroupTagValue
)
from sentry.search.base import ANY
from sentry.search.django.backend import DjangoSearchBackend
from sentry.testutils import TestCase
class DjangoSearchBackendTest(TestCase):
def create_backend(self):
return DjangoSearchBackend()
def setUp(self):
self.backend = self.create_backend()
self.project1 = self.create_project(name='foo')
self.project2 = self.create_project(name='bar')
self.group1 = self.create_group(
project=self.project1,
checksum='a' * 32,
message='foo',
times_seen=5,
status=GroupStatus.UNRESOLVED,
last_seen=datetime(2013, 8, 13, 3, 8, 24, 880386),
first_seen=datetime(2013, 7, 13, 3, 8, 24, 880386),
)
self.event1 = self.create_event(
event_id='a' * 32,
group=self.group1,
datetime=datetime(2013, 7, 13, 3, 8, 24, 880386),
tags={
'server': 'example.com',
'env': 'production',
}
)
self.event3 = self.create_event(
event_id='c' * 32,
group=self.group1,
datetime=datetime(2013, 8, 13, 3, 8, 24, 880386),
tags={
'server': 'example.com',
'env': 'production',
}
)
self.group2 = self.create_group(
project=self.project1,
checksum='b' * 32,
message='bar',
times_seen=10,
status=GroupStatus.RESOLVED,
last_seen=datetime(2013, 7, 14, 3, 8, 24, 880386),
first_seen=datetime(2013, 7, 14, 3, 8, 24, 880386),
)
self.event2 = self.create_event(
event_id='b' * 32,
group=self.group2,
datetime=datetime(2013, 7, 14, 3, 8, 24, 880386),
tags={
'server': 'example.com',
'env': 'staging',
'url': 'http://example.com',
}
)
for key, value in self.event1.data['tags']:
GroupTagValue.objects.create(
project_id=self.group1.project_id,
group_id=self.group1.id,
key=key,
value=value,
)
for key, value in self.event2.data['tags']:
GroupTagValue.objects.create(
project_id=self.group2.project_id,
group_id=self.group2.id,
key=key,
value=value,
)
GroupBookmark.objects.create(
user=self.user,
group=self.group2,
project=self.group2.project,
)
GroupAssignee.objects.create(
user=self.user,
group=self.group2,
project=self.group2.project,
)
GroupSubscription.objects.create(
user=self.user,
group=self.group1,
project=self.group1.project,
is_active=True,
)
GroupSubscription.objects.create(
user=self.user,
group=self.group2,
project=self.group2.project,
is_active=False,
)
def test_query(self):
results = self.backend.query(self.project1, query='foo')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, query='bar')
assert len(results) == 1
assert results[0] == self.group2
def test_sort(self):
results = self.backend.query(self.project1, sort_by='date')
assert len(results) == 2
assert results[0] == self.group1
assert results[1] == self.group2
results = self.backend.query(self.project1, sort_by='new')
assert len(results) == 2
assert results[0] == self.group2
assert results[1] == self.group1
results = self.backend.query(self.project1, sort_by='freq')
assert len(results) == 2
assert results[0] == self.group2
assert results[1] == self.group1
def test_status(self):
results = self.backend.query(self.project1, status=GroupStatus.UNRESOLVED)
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, status=GroupStatus.RESOLVED)
assert len(results) == 1
assert results[0] == self.group2
def test_tags(self):
results = self.backend.query(self.project1, tags={'env': 'staging'})
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(self.project1, tags={'env': 'example.com'})
assert len(results) == 0
results = self.backend.query(self.project1, tags={'env': ANY})
assert len(results) == 2
results = self.backend.query(self.project1, tags={'env': 'staging', 'server': 'example.com'})
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(self.project1, tags={'env': 'staging', 'server': ANY})
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(self.project1, tags={'env': 'staging', 'server': 'bar.example.com'})
assert len(results) == 0
def test_bookmarked_by(self):
results = self.backend.query(self.project1, bookmarked_by=self.user)
assert len(results) == 1
assert results[0] == self.group2
def test_project(self):
results = self.backend.query(self.project2)
assert len(results) == 0
def test_pagination(self):
results = self.backend.query(self.project1, limit=1, sort_by='date')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, cursor=results.next, limit=1, sort_by='date')
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(self.project1, cursor=results.next, limit=1, sort_by='date')
assert len(results) == 0
def test_age_filter(self):
results = self.backend.query(
self.project1,
age_from=self.group2.first_seen,
)
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(
self.project1,
age_to=self.group1.first_seen + timedelta(minutes=1),
)
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(
self.project1,
age_from=self.group1.first_seen,
age_to=self.group1.first_seen + timedelta(minutes=1),
)
assert len(results) == 1
assert results[0] == self.group1
def test_last_seen_filter(self):
results = self.backend.query(
self.project1,
last_seen_from=self.group1.last_seen,
)
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(
self.project1,
last_seen_to=self.group2.last_seen + timedelta(minutes=1),
)
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(
self.project1,
last_seen_from=self.group1.last_seen,
last_seen_to=self.group1.last_seen + timedelta(minutes=1),
)
assert len(results) == 1
assert results[0] == self.group1
def test_date_filter(self):
results = self.backend.query(
self.project1,
date_from=self.event2.datetime,
)
assert len(results) == 2
assert results[0] == self.group1
assert results[1] == self.group2
results = self.backend.query(
self.project1,
date_to=self.event1.datetime + timedelta(minutes=1),
)
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(
self.project1,
date_from=self.event1.datetime,
date_to=self.event2.datetime + timedelta(minutes=1),
)
assert len(results) == 2
assert results[0] == self.group1
assert results[1] == self.group2
def test_unassigned(self):
results = self.backend.query(self.project1, unassigned=True)
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, unassigned=False)
assert len(results) == 1
assert results[0] == self.group2
def test_assigned_to(self):
results = self.backend.query(self.project1, assigned_to=self.user)
assert len(results) == 1
assert results[0] == self.group2
def test_subscribed_by(self):
results = self.backend.query(
self.group1.project,
subscribed_by=self.user,
)
assert len(results) == 1
assert results[0] == self.group1
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SparseSliceOpTest(test.TestCase):
def _SparseTensor_4x6(self, val_dtype=np.int64):
# [0 | |2 | |4 |5 ]
# [ |11| |13|14| ]
# [20| | |23| |25]
# [30| |32|33| |35]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1,
4], [2, 0],
[2, 3], [2, 5], [3, 0], [3, 2], [3, 3], [3, 5]]).astype(
np.int64)
val = np.array([0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(
val_dtype)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensor_5x7(self):
# [0 | |2 | |4 |5 | ]
# [ |11| |13|14| |16]
# [20| | |23| |25| ]
# [30| |32|33| |35| ]
# [ |41| | |44| |46]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[1, 6], [2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5], [4, 1], [4, 4], [4, 6]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25, 30, 32, 33, 35, 41, 44,
46]).astype(np.int64)
shape = np.array([5, 7]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensorValue_3x4x2(self):
# slice(:,:, 0)
# ['a0'| |'b0'| ]
# [ |'c0'| |'d0']
# [ | |'e0'| ]
# slice(:,:, 1)
# ['a1'| |'b1'| ]
# [ |'c1'| |'d1']
# [ | |'e1'| ]
ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1], [1, 1, 0],
[1, 1, 1], [1, 3, 0], [1, 3, 1], [2, 2, 0], [2, 2,
1]]).astype(
np.int64)
val = np.array(['a0', 'a1', 'b0', 'b1', 'c0', 'c1', 'd0', 'd1', 'e0', 'e1'])
shape = np.array([3, 4, 2]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensor_3x4x2(self):
return sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_3x4x2())
def testSliceMatrixRows(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 6])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [3, 7])
self.assertAllEqual(
sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 4, 5, 11, 13, 14])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [2, 6])
self.assertAllEqual(
sp_tensor1.indices.eval(),
[[0, 0], [0, 3], [0, 5], [1, 0], [1, 2], [1, 3], [1, 5]])
self.assertAllEqual(sp_tensor1.values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 6])
def testSliceMatrixUnevenCols(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x7()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [5, 3])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 3], [5, 2])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 5], [5, 2])
self.assertAllEqual(
sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [1, 1], [2, 0], [3, 0], [3, 2], [4, 1]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 11, 20, 30, 32, 41])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [5, 3])
self.assertAllEqual(sp_tensor1.indices.eval(),
[[0, 1], [1, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor1.values.eval(), [4, 13, 14, 23, 33, 44])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor2.indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor2.values.eval(), [5, 16, 25, 35, 46])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [5, 2])
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [5, 2])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 2])
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 6], [5, 2])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 11, 20, 30, 41])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor1.indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sp_tensor1.values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor2.indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1], [4, 0]])
self.assertAllEqual(sp_tensor2.values.eval(), [4, 5, 14, 25, 35, 44])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor3.indices.eval(), [[1, 0], [4, 0]])
self.assertAllEqual(sp_tensor3.values.eval(), [16, 46])
self.assertAllEqual(sp_tensor3.dense_shape.eval(), [5, 1])
def testSliceMatrixUnevenRows(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x7()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [3, 7])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [3, 0], [3, 7])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6], [2, 0], [2, 3], [2, 5]])
self.assertAllEqual(sp_tensor0.values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [3, 7])
self.assertAllEqual(
sp_tensor1.indices.eval(),
[[0, 0], [0, 2], [0, 3], [0, 5], [1, 1], [1, 4], [1, 6]])
self.assertAllEqual(sp_tensor1.values.eval(),
[30, 32, 33, 35, 41, 44, 46])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 7])
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 7])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [2, 7])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [4, 0], [2, 7])
self.assertAllEqual(
sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4], [1, 6]])
self.assertAllEqual(sp_tensor0.values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensor1.values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensor2.indices.eval(), [[0, 1], [0, 4], [0, 6]])
self.assertAllEqual(sp_tensor2.values.eval(), [41, 44, 46])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [1, 7])
return
def testSliceAllRows(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [1, 6])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [1, 0], [1, 6])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [2, 0], [1, 7])
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [3, 0], [2, 7])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 4, 5])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor1.indices.eval(), [[0, 1], [0, 3], [0, 4]])
self.assertAllEqual(sp_tensor1.values.eval(), [11, 13, 14])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor2.indices.eval(), [[0, 0], [0, 3], [0, 5]])
self.assertAllEqual(sp_tensor2.values.eval(), [20, 23, 25])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor3.indices.eval(),
[[0, 0], [0, 2], [0, 3], [0, 5]])
self.assertAllEqual(sp_tensor3.values.eval(), [30, 32, 33, 35])
self.assertAllEqual(sp_tensor3.dense_shape.eval(), [1, 6])
def testSliceColumns(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [4, 2])
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 3])
self.assertAllEqual(sparse_tensor0.indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor0.values.eval(), [0, 11, 20, 30])
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensor1.indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sparse_tensor1.values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensor2.indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1]])
self.assertAllEqual(sparse_tensor2.values.eval(), [4, 5, 14, 25, 35])
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [4, 2])
def testSliceAllColumns(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [4, 1])
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 1], [4, 1])
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 2], [4, 1])
sparse_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 3], [4, 1])
sparse_tensor4 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 1])
sparse_tensor5 = sparse_ops.sparse_slice(sp_input, [0, 5], [6, 3])
self.assertAllEqual(sparse_tensor0.indices.eval(),
[[0, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor0.values.eval(), [0, 20, 30])
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor1.indices.eval(), [[1, 0]])
self.assertAllEqual(sparse_tensor1.values.eval(), [11])
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor2.indices.eval(), [[0, 0], [3, 0]])
self.assertAllEqual(sparse_tensor2.values.eval(), [2, 32])
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor3.indices.eval(),
[[1, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor3.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor3.values.eval(), [13, 23, 33])
self.assertAllEqual(sparse_tensor4.indices.eval(), [[0, 0], [1, 0]])
self.assertAllEqual(sparse_tensor4.values.eval(), [4, 14])
self.assertAllEqual(sparse_tensor4.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor5.indices.eval(),
[[0, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor5.values.eval(), [5, 25, 35])
self.assertAllEqual(sparse_tensor5.dense_shape.eval(), [4, 1])
def testGradients(self):
sp_input = self._SparseTensor_4x6(val_dtype=np.float32)
start_and_size = [([0, 0], [4, 2]),
([0, 2], [5, 2]),
([0, 4], [5, 3])]
with self.test_session(use_gpu=False):
for start, size in start_and_size:
sp_output = sparse_ops.sparse_slice(sp_input, start, size)
nnz_in = len(sp_input.values.eval())
nnz_out = len(sp_output.values.eval())
err = gradient_checker.compute_gradient_error(
[sp_input.values], [(nnz_in,)], sp_output.values, (nnz_out,))
self.assertLess(err, 1e-3)
if __name__ == '__main__':
test.main()
| |
from django import forms
from django.core.exceptions import PermissionDenied
from django.db import router
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.template.response import TemplateResponse
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ungettext
from django.utils.text import capfirst
from xadmin.sites import site
from xadmin.util import model_format_dict, get_deleted_objects, model_ngettext
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.views.base import filter_hook, ModelAdminView
ACTION_CHECKBOX_NAME = '_selected_action'
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
def action_checkbox(obj):
return checkbox.render(ACTION_CHECKBOX_NAME, force_unicode(obj.pk))
action_checkbox.short_description = mark_safe(
'<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
action_checkbox.allow_export = False
action_checkbox.is_column = False
class BaseActionView(ModelAdminView):
action_name = None
description = None
icon = 'fa fa-tasks'
model_perm = 'change'
@classmethod
def has_perm(cls, list_view):
return list_view.get_model_perms()[cls.model_perm]
def init_action(self, list_view):
self.list_view = list_view
self.admin_site = list_view.admin_site
@filter_hook
def do_action(self, queryset):
pass
class DeleteSelectedAction(BaseActionView):
action_name = "delete_selected"
description = _(u'Delete selected %(verbose_name_plural)s')
delete_confirmation_template = None
delete_selected_confirmation_template = None
model_perm = 'delete'
icon = 'fa fa-times'
@filter_hook
def delete_models(self, queryset):
n = queryset.count()
if n:
queryset.delete()
self.message_user(_("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(self.opts, n)
}, 'success')
@filter_hook
def do_action(self, queryset):
# Check that the user has delete permission for the actual model
if not self.has_delete_permission():
raise PermissionDenied
using = router.db_for_write(self.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, self.opts, self.user, self.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if self.request.POST.get('post'):
if perms_needed:
raise PermissionDenied
self.delete_models(queryset)
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(self.opts.verbose_name)
else:
objects_name = force_unicode(self.opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = self.get_context()
context.update({
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": self.opts,
"app_label": self.app_label,
'action_checkbox_name': ACTION_CHECKBOX_NAME,
})
# Display the confirmation page
return TemplateResponse(self.request, self.delete_selected_confirmation_template or
self.get_template_list('views/model_delete_selected_confirm.html'), context, current_app=self.admin_site.name)
class ActionPlugin(BaseAdminPlugin):
# Actions
actions = []
actions_selection_counter = True
global_actions = [DeleteSelectedAction]
def init_request(self, *args, **kwargs):
self.actions = self.get_actions()
return bool(self.actions)
def get_list_display(self, list_display):
if self.actions:
list_display.insert(0, 'action_checkbox')
self.admin_view.action_checkbox = action_checkbox
return list_display
def get_list_display_links(self, list_display_links):
if self.actions:
if len(list_display_links) == 1 and list_display_links[0] == 'action_checkbox':
return list(self.admin_view.list_display[1:2])
return list_display_links
def get_context(self, context):
if self.actions and self.admin_view.result_count:
av = self.admin_view
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', av.result_count)
new_context = {
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(av.result_list)},
'selection_note_all': selection_note_all % {'total_count': av.result_count},
'action_choices': self.get_action_choices(),
'actions_selection_counter': self.actions_selection_counter,
}
context.update(new_context)
return context
def post_response(self, response, *args, **kwargs):
request = self.admin_view.request
av = self.admin_view
# Actions with no confirmation
if self.actions and 'action' in request.POST:
action = request.POST['action']
if action not in self.actions:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
av.message_user(msg)
else:
ac, name, description, icon = self.actions[action]
select_across = request.POST.get('select_across', False) == '1'
selected = request.POST.getlist(ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
av.message_user(msg)
else:
queryset = av.list_queryset._clone()
if not select_across:
# Perform the action only on the selected objects
queryset = av.list_queryset.filter(pk__in=selected)
response = self.response_action(ac, queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
return response
def response_action(self, ac, queryset):
if isinstance(ac, type) and issubclass(ac, BaseActionView):
action_view = self.get_model_view(ac, self.admin_view.model)
action_view.init_action(self.admin_view)
return action_view.do_action(queryset)
else:
return ac(self.admin_view, self.request, queryset)
def get_actions(self):
if self.actions is None:
return SortedDict()
actions = [self.get_action(action) for action in self.global_actions]
for klass in self.admin_view.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
if not class_actions:
continue
actions.extend(
[self.get_action(action) for action in class_actions])
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into a SortedDict keyed by name.
actions = SortedDict([
(name, (ac, name, desc, icon))
for ac, name, desc, icon in actions
])
return actions
def get_action_choices(self):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = []
for ac, name, description, icon in self.actions.itervalues():
choice = (name, description % model_format_dict(self.opts), icon)
choices.append(choice)
return choices
def get_action(self, action):
if isinstance(action, type) and issubclass(action, BaseActionView):
if not action.has_perm(self.admin_view):
return None
return action, getattr(action, 'action_name'), getattr(action, 'description'), getattr(action, 'icon')
elif callable(action):
func = action
action = action.__name__
elif hasattr(self.admin_view.__class__, action):
func = getattr(self.admin_view.__class__, action)
else:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description, getattr(func, 'icon', 'tasks')
# View Methods
def result_header(self, item, field_name, row):
if item.attr and field_name == 'action_checkbox':
item.classes.append("action-checkbox-column")
return item
def result_item(self, item, obj, field_name, row):
if item.field is None and field_name == u'action_checkbox':
item.classes.append("action-checkbox")
return item
# Media
def get_media(self, media):
if self.actions and self.admin_view.result_count:
media = media + self.vendor('xadmin.plugin.actions.js', 'xadmin.plugins.css')
return media
# Block Views
def block_results_bottom(self, context, nodes):
if self.actions and self.admin_view.result_count:
nodes.append(loader.render_to_string('xadmin/blocks/model_list.results_bottom.actions.html', context_instance=context))
site.register_plugin(ActionPlugin, ListAdminView)
| |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parent client for calling the Google Cloud Bigtable API.
This is the base from which all interactions with the API occur.
In the hierarchy of API concepts
* a :class:`~google.cloud.bigtable.client.Client` owns an
:class:`~google.cloud.bigtable.instance.Instance`
* an :class:`~google.cloud.bigtable.instance.Instance` owns a
:class:`~google.cloud.bigtable.table.Table`
* a :class:`~google.cloud.bigtable.table.Table` owns a
:class:`~.column_family.ColumnFamily`
* a :class:`~google.cloud.bigtable.table.Table` owns a
:class:`~google.cloud.bigtable.row.Row` (and all the cells in the row)
"""
import os
import warnings
import grpc
from google.api_core.gapic_v1 import client_info
from google.cloud import bigtable_v2
from google.cloud import bigtable_admin_v2
from google.cloud.bigtable import __version__
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.cluster import Cluster
from google.cloud.client import ClientWithProject
from google.cloud.bigtable_admin_v2 import enums
from google.cloud.bigtable.cluster import _CLUSTER_NAME_RE
from google.cloud.environment_vars import BIGTABLE_EMULATOR
INSTANCE_TYPE_PRODUCTION = enums.Instance.Type.PRODUCTION
INSTANCE_TYPE_DEVELOPMENT = enums.Instance.Type.DEVELOPMENT
INSTANCE_TYPE_UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED
_CLIENT_INFO = client_info.ClientInfo(
client_library_version=__version__)
SPANNER_ADMIN_SCOPE = 'https://www.googleapis.com/auth/spanner.admin'
ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin'
"""Scope for interacting with the Cluster Admin and Table Admin APIs."""
DATA_SCOPE = 'https://www.googleapis.com/auth/bigtable.data'
"""Scope for reading and writing table data."""
READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly'
"""Scope for reading table data."""
def _create_gapic_client(client_class):
def inner(self):
if self._emulator_host is None:
return client_class(
credentials=self._credentials, client_info=_CLIENT_INFO)
else:
return client_class(
channel=self._emulator_channel, client_info=_CLIENT_INFO)
return inner
class Client(ClientWithProject):
"""Client for interacting with Google Cloud Bigtable API.
.. note::
Since the Cloud Bigtable API requires the gRPC transport, no
``_http`` argument is accepted by this class.
:type project: :class:`str` or :func:`unicode <unicode>`
:param project: (Optional) The ID of the project which owns the
instances, tables and data. If not provided, will
attempt to determine from the environment.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed, falls back to the default
inferred from the environment.
:type read_only: bool
:param read_only: (Optional) Boolean indicating if the data scope should be
for reading only (or for writing as well). Defaults to
:data:`False`.
:type admin: bool
:param admin: (Optional) Boolean indicating if the client will be used to
interact with the Instance Admin or Table Admin APIs. This
requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`.
:type channel: :instance: grpc.Channel
:param channel (grpc.Channel): (Optional) DEPRECATED:
A ``Channel`` instance through which to make calls.
This argument is mutually exclusive with ``credentials``;
providing both will raise an exception. No longer used.
:raises: :class:`ValueError <exceptions.ValueError>` if both ``read_only``
and ``admin`` are :data:`True`
"""
_table_data_client = None
_table_admin_client = None
_instance_admin_client = None
def __init__(self, project=None, credentials=None,
read_only=False, admin=False, channel=None):
if read_only and admin:
raise ValueError('A read-only client cannot also perform'
'administrative actions.')
# NOTE: We set the scopes **before** calling the parent constructor.
# It **may** use those scopes in ``with_scopes_if_required``.
self._read_only = bool(read_only)
self._admin = bool(admin)
self._emulator_host = os.getenv(BIGTABLE_EMULATOR)
self._emulator_channel = None
if self._emulator_host is not None:
self._emulator_channel = grpc.insecure_channel(self._emulator_host)
if channel is not None:
warnings.warn(
"'channel' is deprecated and no longer used.",
DeprecationWarning, stacklevel=2)
self._channel = channel
self.SCOPE = self._get_scopes()
super(Client, self).__init__(project=project, credentials=credentials)
def _get_scopes(self):
"""Get the scopes corresponding to admin / read-only state.
Returns:
Tuple[str, ...]: The tuple of scopes.
"""
if self._read_only:
scopes = (READ_ONLY_SCOPE,)
else:
scopes = (DATA_SCOPE,)
if self._admin:
scopes += (ADMIN_SCOPE,)
return scopes
@property
def project_path(self):
"""Project name to be used with Instance Admin API.
.. note::
This property will not change if ``project`` does not, but the
return value is not cached.
The project name is of the form
``"projects/{project}"``
:rtype: str
:returns: Return a fully-qualified project string.
"""
return self.instance_admin_client.project_path(self.project)
@property
def table_data_client(self):
"""Getter for the gRPC stub used for the Table Admin API.
:rtype: :class:`.bigtable_v2.BigtableClient`
:returns: A BigtableClient object.
"""
if self._table_data_client is None:
self._table_data_client = _create_gapic_client(
bigtable_v2.BigtableClient)(self)
return self._table_data_client
@property
def table_admin_client(self):
"""Getter for the gRPC stub used for the Table Admin API.
:rtype: :class:`.bigtable_admin_pb2.BigtableTableAdmin`
:returns: A BigtableTableAdmin instance.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if self._table_admin_client is None:
if not self._admin:
raise ValueError('Client is not an admin client.')
self._table_admin_client = _create_gapic_client(
bigtable_admin_v2.BigtableTableAdminClient)(self)
return self._table_admin_client
@property
def instance_admin_client(self):
"""Getter for the gRPC stub used for the Table Admin API.
:rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin`
:returns: A BigtableInstanceAdmin instance.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if self._instance_admin_client is None:
if not self._admin:
raise ValueError('Client is not an admin client.')
self._instance_admin_client = _create_gapic_client(
bigtable_admin_v2.BigtableInstanceAdminClient)(self)
return self._instance_admin_client
def instance(self, instance_id, display_name=None,
instance_type=None, labels=None):
"""Factory to create a instance associated with this client.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_create_prod_instance]
:end-before: [END bigtable_create_prod_instance]
:type instance_id: str
:param instance_id: The ID of the instance.
:type display_name: str
:param display_name: (Optional) The display name for the instance in
the Cloud Console UI. (Must be between 4 and 30
characters.) If this value is not set in the
constructor, will fall back to the instance ID.
:type instance_type: int
:param instance_type: (Optional) The type of the instance.
Possible values are represented
by the following constants:
:data:`google.cloud.bigtable.enums.InstanceType.PRODUCTION`.
:data:`google.cloud.bigtable.enums.InstanceType.DEVELOPMENT`,
Defaults to
:data:`google.cloud.bigtable.enums.InstanceType.UNSPECIFIED`.
:type labels: dict
:param labels: (Optional) Labels are a flexible and lightweight
mechanism for organizing cloud resources into groups
that reflect a customer's organizational needs and
deployment strategies. They can be used to filter
resources and aggregate metrics. Label keys must be
between 1 and 63 characters long. Maximum 64 labels can
be associated with a given resource. Label values must
be between 0 and 63 characters long. Keys and values
must both be under 128 bytes.
:rtype: :class:`~google.cloud.bigtable.instance.Instance`
:returns: an instance owned by this client.
"""
return Instance(instance_id, self, display_name=display_name,
instance_type=instance_type, labels=labels)
def list_instances(self):
"""List instances owned by the project.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_list_instances]
:end-before: [END bigtable_list_instances]
:rtype: tuple
:returns:
(instances, failed_locations), where 'instances' is list of
:class:`google.cloud.bigtable.instance.Instance`, and
'failed_locations' is a list of locations which could not
be resolved.
"""
resp = self.instance_admin_client.list_instances(self.project_path)
instances = [
Instance.from_pb(instance, self) for instance in resp.instances]
return instances, resp.failed_locations
def list_clusters(self):
"""List the clusters in the project.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_list_clusters_in_project]
:end-before: [END bigtable_list_clusters_in_project]
:rtype: tuple
:returns:
(clusters, failed_locations), where 'clusters' is list of
:class:`google.cloud.bigtable.instance.Cluster`, and
'failed_locations' is a list of strings representing
locations which could not be resolved.
"""
resp = (self.instance_admin_client.list_clusters(
self.instance_admin_client.instance_path(self.project, '-')))
clusters = []
instances = {}
for cluster in resp.clusters:
match_cluster_name = _CLUSTER_NAME_RE.match(cluster.name)
instance_id = match_cluster_name.group('instance')
if instance_id not in instances:
instances[instance_id] = self.instance(instance_id)
clusters.append(Cluster.from_pb(cluster, instances[instance_id]))
return clusters, resp.failed_locations
| |
"""
pyLDAvis Prepare
===============
Main transformation functions for preparing LDAdata to the visualization's data structures
"""
from __future__ import absolute_import
from past.builtins import basestring
from collections import namedtuple
import json
import logging
from joblib import Parallel, delayed, cpu_count
import numpy as np
import pandas as pd
from scipy.stats import entropy
from scipy.spatial.distance import pdist, squareform
from .utils import NumPyEncoder
try:
from sklearn.manifold import MDS, TSNE
sklearn_present = True
except ImportError:
sklearn_present = False
def __num_dist_rows__(array, ndigits=2):
return array.shape[0] - int((pd.DataFrame(array).sum(axis=1) < 0.999).sum())
class ValidationError(ValueError):
pass
def _input_check(topic_term_dists, doc_topic_dists, doc_lengths, vocab, term_frequency):
ttds = topic_term_dists.shape
dtds = doc_topic_dists.shape
errors = []
def err(msg):
errors.append(msg)
if dtds[1] != ttds[0]:
err('Number of rows of topic_term_dists does not match number of columns of doc_topic_dists; both should be equal to the number of topics in the model.')
if len(doc_lengths) != dtds[0]:
err('Length of doc_lengths not equal to the number of rows in doc_topic_dists; both should be equal to the number of documents in the data.')
W = len(vocab)
if ttds[1] != W:
err('Number of terms in vocabulary does not match the number of columns of topic_term_dists (where each row of topic_term_dists is a probability distribution of terms for a given topic).')
if len(term_frequency) != W:
err('Length of term_frequency not equal to the number of terms in the vocabulary (len of vocab).')
if __num_dist_rows__(topic_term_dists) != ttds[0]:
err('Not all rows (distributions) in topic_term_dists sum to 1.')
if __num_dist_rows__(doc_topic_dists) != dtds[0]:
err('Not all rows (distributions) in doc_topic_dists sum to 1.')
if len(errors) > 0:
return errors
def _input_validate(*args):
res = _input_check(*args)
if res:
raise ValidationError('\n' + '\n'.join([' * ' + s for s in res]))
def _jensen_shannon(_P, _Q):
_M = 0.5 * (_P + _Q)
return 0.5 * (entropy(_P, _M) + entropy(_Q, _M))
def _pcoa(pair_dists, n_components=2):
"""Principal Coordinate Analysis,
aka Classical Multidimensional Scaling
"""
# code referenced from skbio.stats.ordination.pcoa
# https://github.com/biocore/scikit-bio/blob/0.5.0/skbio/stats/ordination/_principal_coordinate_analysis.py
# pairwise distance matrix is assumed symmetric
pair_dists = np.asarray(pair_dists, np.float64)
# perform SVD on double centred distance matrix
n = pair_dists.shape[0]
H = np.eye(n) - np.ones((n, n)) / n
B = - H.dot(pair_dists ** 2).dot(H) / 2
eigvals, eigvecs = np.linalg.eig(B)
# Take first n_components of eigenvalues and eigenvectors
# sorted in decreasing order
ix = eigvals.argsort()[::-1][:n_components]
eigvals = eigvals[ix]
eigvecs = eigvecs[:, ix]
# replace any remaining negative eigenvalues and associated eigenvectors with zeroes
# at least 1 eigenvalue must be zero
eigvals[np.isclose(eigvals, 0)] = 0
if np.any(eigvals < 0):
ix_neg = eigvals < 0
eigvals[ix_neg] = np.zeros(eigvals[ix_neg].shape)
eigvecs[:, ix_neg] = np.zeros(eigvecs[:, ix_neg].shape)
return np.sqrt(eigvals) * eigvecs
def js_PCoA(distributions):
"""Dimension reduction via Jensen-Shannon Divergence & Principal Coordinate Analysis
(aka Classical Multidimensional Scaling)
Parameters
----------
distributions : array-like, shape (`n_dists`, `k`)
Matrix of distributions probabilities.
Returns
-------
pcoa : array, shape (`n_dists`, 2)
"""
dist_matrix = squareform(pdist(distributions, metric=_jensen_shannon))
return _pcoa(dist_matrix)
def js_MMDS(distributions, **kwargs):
"""Dimension reduction via Jensen-Shannon Divergence & Metric Multidimensional Scaling
Parameters
----------
distributions : array-like, shape (`n_dists`, `k`)
Matrix of distributions probabilities.
**kwargs : Keyword argument to be passed to `sklearn.manifold.MDS()`
Returns
-------
mmds : array, shape (`n_dists`, 2)
"""
dist_matrix = squareform(pdist(distributions, metric=_jensen_shannon))
model = MDS(n_components=2, random_state=0, dissimilarity='precomputed', **kwargs)
return model.fit_transform(dist_matrix)
def js_TSNE(distributions, **kwargs):
"""Dimension reduction via Jensen-Shannon Divergence & t-distributed Stochastic Neighbor Embedding
Parameters
----------
distributions : array-like, shape (`n_dists`, `k`)
Matrix of distributions probabilities.
**kwargs : Keyword argument to be passed to `sklearn.manifold.TSNE()`
Returns
-------
tsne : array, shape (`n_dists`, 2)
"""
dist_matrix = squareform(pdist(distributions, metric=_jensen_shannon))
model = TSNE(n_components=2, random_state=0, metric='precomputed', **kwargs)
return model.fit_transform(dist_matrix)
def _df_with_names(data, index_name, columns_name):
if type(data) == pd.DataFrame:
# we want our index to be numbered
df = pd.DataFrame(data.values)
else:
df = pd.DataFrame(data)
df.index.name = index_name
df.columns.name = columns_name
return df
def _series_with_name(data, name):
if type(data) == pd.Series:
data.name = name
# ensures a numeric index
return data.reset_index()[name]
else:
return pd.Series(data, name=name)
def _topic_coordinates(mds, topic_term_dists, topic_proportion):
K = topic_term_dists.shape[0]
mds_res = mds(topic_term_dists)
assert mds_res.shape == (K, 2)
mds_df = pd.DataFrame({'x': mds_res[:,0], 'y': mds_res[:,1], 'topics': range(1, K + 1), \
'cluster': 1, 'Freq': topic_proportion * 100})
# note: cluster (should?) be deprecated soon. See: https://github.com/cpsievert/LDAvis/issues/26
return mds_df
def _chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i:i+n]
def _job_chunks(l, n_jobs):
n_chunks = n_jobs
if n_jobs < 0:
# so, have n chunks if we are using all n cores/cpus
n_chunks = cpu_count() + 1 - n_jobs
return _chunks(l, n_chunks)
def _find_relevance(log_ttd, log_lift, R, lambda_):
relevance = lambda_ * log_ttd + (1 - lambda_) * log_lift
return relevance.T.apply(lambda s: s.sort_values(ascending=False).index).head(R)
def _find_relevance_chunks(log_ttd, log_lift, R, lambda_seq):
return pd.concat([_find_relevance(log_ttd, log_lift, R, l) for l in lambda_seq])
def _topic_info(topic_term_dists, topic_proportion, term_frequency, term_topic_freq, vocab, lambda_step, R, n_jobs):
# marginal distribution over terms (width of blue bars)
term_proportion = term_frequency / term_frequency.sum()
# compute the distinctiveness and saliency of the terms:
# this determines the R terms that are displayed when no topic is selected
topic_given_term = topic_term_dists / topic_term_dists.sum()
kernel = (topic_given_term * np.log((topic_given_term.T / topic_proportion).T))
distinctiveness = kernel.sum()
saliency = term_proportion * distinctiveness
# Order the terms for the "default" view by decreasing saliency:
default_term_info = pd.DataFrame({'saliency': saliency, 'Term': vocab, \
'Freq': term_frequency, 'Total': term_frequency, \
'Category': 'Default'}). \
sort_values(by='saliency', ascending=False). \
head(R).drop('saliency', 1)
# Rounding Freq and Total to integer values to match LDAvis code:
default_term_info['Freq'] = np.floor(default_term_info['Freq'])
default_term_info['Total'] = np.floor(default_term_info['Total'])
ranks = np.arange(R, 0, -1)
default_term_info['logprob'] = default_term_info['loglift'] = ranks
## compute relevance and top terms for each topic
log_lift = np.log(topic_term_dists / term_proportion)
log_ttd = np.log(topic_term_dists)
lambda_seq = np.arange(0, 1 + lambda_step, lambda_step)
def topic_top_term_df(tup):
new_topic_id, (original_topic_id, topic_terms) = tup
term_ix = topic_terms.unique()
return pd.DataFrame({'Term': vocab[term_ix], \
'Freq': term_topic_freq.loc[original_topic_id, term_ix], \
'Total': term_frequency[term_ix], \
'logprob': log_ttd.loc[original_topic_id, term_ix].round(4), \
'loglift': log_lift.loc[original_topic_id, term_ix].round(4), \
'Category': 'Topic%d' % new_topic_id})
top_terms = pd.concat(Parallel(n_jobs=n_jobs)(delayed(_find_relevance_chunks)(log_ttd, log_lift, R, ls) \
for ls in _job_chunks(lambda_seq, n_jobs)))
topic_dfs = map(topic_top_term_df, enumerate(top_terms.T.iterrows(), 1))
return pd.concat([default_term_info] + list(topic_dfs))
def _token_table(topic_info, term_topic_freq, vocab, term_frequency):
# last, to compute the areas of the circles when a term is highlighted
# we must gather all unique terms that could show up (for every combination
# of topic and value of lambda) and compute its distribution over topics.
# term-topic frequency table of unique terms across all topics and all values of lambda
term_ix = topic_info.index.unique()
term_ix = np.sort(term_ix.values)
top_topic_terms_freq = term_topic_freq[term_ix]
# use the new ordering for the topics
K = len(term_topic_freq)
top_topic_terms_freq.index = range(1, K + 1)
top_topic_terms_freq.index.name = 'Topic'
# we filter to Freq >= 0.5 to avoid sending too much data to the browser
token_table = pd.DataFrame({'Freq': top_topic_terms_freq.unstack()}). \
reset_index().set_index('term'). \
query('Freq >= 0.5')
token_table['Freq'] = token_table['Freq'].round()
token_table['Term'] = vocab[token_table.index.values].values
# Normalize token frequencies:
token_table['Freq'] = token_table.Freq / term_frequency[token_table.index]
return token_table.sort_values(by=['Term', 'Topic'])
def prepare(topic_term_dists, doc_topic_dists, doc_lengths, vocab, term_frequency, \
R=30, lambda_step=0.01, mds=js_PCoA, n_jobs=-1, \
plot_opts={'xlab': 'PC1', 'ylab': 'PC2'}, sort_topics=True):
"""Transforms the topic model distributions and related corpus data into
the data structures needed for the visualization.
Parameters
----------
topic_term_dists : array-like, shape (`n_topics`, `n_terms`)
Matrix of topic-term probabilities. Where `n_terms` is `len(vocab)`.
doc_topic_dists : array-like, shape (`n_docs`, `n_topics`)
Matrix of document-topic probabilities.
doc_lengths : array-like, shape `n_docs`
The length of each document, i.e. the number of words in each document.
The order of the numbers should be consistent with the ordering of the
docs in `doc_topic_dists`.
vocab : array-like, shape `n_terms`
List of all the words in the corpus used to train the model.
term_frequency : array-like, shape `n_terms`
The count of each particular term over the entire corpus. The ordering
of these counts should correspond with `vocab` and `topic_term_dists`.
R : int
The number of terms to display in the barcharts of the visualization.
Default is 30. Recommended to be roughly between 10 and 50.
lambda_step : float, between 0 and 1
Determines the interstep distance in the grid of lambda values over
which to iterate when computing relevance.
Default is 0.01. Recommended to be between 0.01 and 0.1.
mds : function or a string representation of function
A function that takes `topic_term_dists` as an input and outputs a
`n_topics` by `2` distance matrix. The output approximates the distance
between topics. See :func:`js_PCoA` for details on the default function.
A string representation currently accepts `pcoa` (or upper case variant),
`mmds` (or upper case variant) and `tsne` (or upper case variant),
if `sklearn` package is installed for the latter two.
n_jobs : int
The number of cores to be used to do the computations. The regular
joblib conventions are followed so `-1`, which is the default, will
use all cores.
plot_opts : dict, with keys 'xlab' and `ylab`
Dictionary of plotting options, right now only used for the axis labels.
sort_topics : sort topics by topic proportion (percentage of tokens covered). Set to false to
to keep original topic order.
Returns
-------
prepared_data : PreparedData
A named tuple containing all the data structures required to create
the visualization. To be passed on to functions like :func:`display`.
Notes
-----
This implements the method of `Sievert, C. and Shirley, K. (2014):
LDAvis: A Method for Visualizing and Interpreting Topics, ACL Workshop on
Interactive Language Learning, Visualization, and Interfaces.`
http://nlp.stanford.edu/events/illvi2014/papers/sievert-illvi2014.pdf
See Also
--------
:func:`save_json`: save json representation of a figure to file
:func:`save_html` : save html representation of a figure to file
:func:`show` : launch a local server and show a figure in a browser
:func:`display` : embed figure within the IPython notebook
:func:`enable_notebook` : automatically embed visualizations in IPython notebook
"""
# parse mds
if isinstance(mds, basestring):
mds = mds.lower()
if mds == 'pcoa':
mds = js_PCoA
elif mds in ('mmds', 'tsne'):
if sklearn_present:
mds_opts = {'mmds': js_MMDS, 'tsne': js_TSNE}
mds = mds_opts[mds]
else:
logging.warning('sklearn not present, switch to PCoA')
mds = js_PCoA
else:
logging.warning('Unknown mds `%s`, switch to PCoA' % mds)
mds = js_PCoA
topic_term_dists = _df_with_names(topic_term_dists, 'topic', 'term')
doc_topic_dists = _df_with_names(doc_topic_dists, 'doc', 'topic')
term_frequency = _series_with_name(term_frequency, 'term_frequency')
doc_lengths = _series_with_name(doc_lengths, 'doc_length')
vocab = _series_with_name(vocab, 'vocab')
_input_validate(topic_term_dists, doc_topic_dists, doc_lengths, vocab, term_frequency)
R = min(R, len(vocab))
topic_freq = (doc_topic_dists.T * doc_lengths).T.sum()
# topic_freq = np.dot(doc_topic_dists.T, doc_lengths)
if (sort_topics):
topic_proportion = (topic_freq / topic_freq.sum()).sort_values(ascending=False)
else:
topic_proportion = (topic_freq / topic_freq.sum())
topic_order = topic_proportion.index
# reorder all data based on new ordering of topics
topic_freq = topic_freq[topic_order]
topic_term_dists = topic_term_dists.ix[topic_order]
doc_topic_dists = doc_topic_dists[topic_order]
# token counts for each term-topic combination (widths of red bars)
term_topic_freq = (topic_term_dists.T * topic_freq).T
## Quick fix for red bar width bug. We calculate the
## term frequencies internally, using the topic term distributions and the
## topic frequencies, rather than using the user-supplied term frequencies.
## For a detailed discussion, see: https://github.com/cpsievert/LDAvis/pull/41
term_frequency = np.sum(term_topic_freq, axis=0)
topic_info = _topic_info(topic_term_dists, topic_proportion, term_frequency, term_topic_freq, vocab, lambda_step, R, n_jobs)
token_table = _token_table(topic_info, term_topic_freq, vocab, term_frequency)
topic_coordinates = _topic_coordinates(mds, topic_term_dists, topic_proportion)
client_topic_order = [x + 1 for x in topic_order]
return PreparedData(topic_coordinates, topic_info, token_table, R, lambda_step, plot_opts, client_topic_order)
class PreparedData(namedtuple('PreparedData', ['topic_coordinates', 'topic_info', 'token_table',\
'R', 'lambda_step', 'plot_opts', 'topic_order'])):
def to_dict(self):
return {'mdsDat': self.topic_coordinates.to_dict(orient='list'),
'tinfo': self.topic_info.to_dict(orient='list'),
'token.table': self.token_table.to_dict(orient='list'),
'R': self.R,
'lambda.step': self.lambda_step,
'plot.opts': self.plot_opts,
'topic.order': self.topic_order}
def to_json(self):
return json.dumps(self.to_dict(), cls=NumPyEncoder)
| |
"""
A silly demonstration of how to use the Apple remote.
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
from pyglet.gl import *
import sys
class MainWindow(pyglet.window.Window):
def __init__(self):
super().__init__(visible=False)
self.set_caption('Apple Remote Example')
# Look for the Apple Remote device.
remote = pyglet.input.get_apple_remote()
if not remote:
print('Apple IR Remote not available.')
sys.exit(0)
# Open the remote in exclusive mode so that pressing the remote
# buttons does not activate Front Row, change volume, etc. while
# the remote is being used by our program.
remote.open(self, exclusive=True)
# We push this class onto the remote's event handler stack so that
# the on_button_press and on_button_release methods which we define
# below will be called for the appropriate remote events.
remote.push_handlers(self)
self.carousel = Carousel()
self.setup_opengl()
pyglet.clock.schedule_interval(self.update, 1 / 60.0)
# Event handler for Apple Remote button press events.
# The button parameter is a string specifying the button that was pressed.
def on_button_press(self, button):
print('on_button_press', button)
if button == 'up':
self.carousel.scroll_up()
elif button == 'down':
self.carousel.scroll_down()
elif button == 'left':
self.carousel.step_left()
elif button == 'right':
self.carousel.step_right()
elif button == 'left_hold':
self.carousel.rotate_left()
elif button == 'right_hold':
self.carousel.rotate_right()
elif button == 'select' or button == 'select_hold':
self.carousel.swap_left()
elif button == 'menu' or button == 'menu_hold':
self.carousel.swap_right()
# Event handler for Apple Remote button release events.
# The button parameter is a string specifying the button that was released.
def on_button_release(self, button):
print('on_button_release', button)
if button == 'left_hold':
self.carousel.stop_rotating()
elif button == 'right_hold':
self.carousel.stop_rotating()
def on_draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
gluLookAt(0, 3, -12, 0, 3, 0, 0, 1, 0)
self.carousel.draw()
def on_resize(self, width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
aspect = width / float(height)
glFrustum(-1, 1, -1.8 / aspect, 0.2 / aspect, 1, 100)
glMatrixMode(GL_MODELVIEW)
return pyglet.event.EVENT_HANDLED
def setup_opengl(self):
glClearColor(1, 1, 1, 1)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def update(self, dt):
self.carousel.update(dt)
class Carousel:
"""A rotating collection of labeled tiles."""
def __init__(self):
self.num_tiles = 14
self.index = 0
self.float_index = 0.0
self.float_increment = 1.0 / self.num_tiles
self.angle = 0
self.index_diff = 0
self.is_rotating = False
self.speed = 4 * self.num_tiles
# Create the tiles in the carousel.
self.tiles = list()
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(0, 205, 205), (128, 0, 128), (255, 165, 0)]
class Tile:
value = 0
color = [255, 255, 255]
for i in range(self.num_tiles):
tile = Tile()
tile.value = i % 26
tile.color = colors[i % len(colors)]
self.tiles.append(tile)
# Create glyphs for the characters displayed on the tiles.
font = pyglet.font.load('Courier', 64)
self.glyphs = font.get_glyphs('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
def scroll_up(self):
"""Increment the character displayed on the main tile."""
self.tiles[self.index].value = (self.tiles[self.index].value + 1) % 26
def scroll_down(self):
"""Decrement the character displayed on the main tile."""
self.tiles[self.index].value = (self.tiles[self.index].value - 1) % 26
def swap_left(self):
"""Swap the two left tiles."""
i = self.index
j = (self.index - 1) % self.num_tiles
self.tiles[i], self.tiles[j] = self.tiles[j], self.tiles[i]
def swap_right(self):
"""Swap the two right tiles."""
i = self.index
j = (self.index + 1) % self.num_tiles
self.tiles[i], self.tiles[j] = self.tiles[j], self.tiles[i]
def step_left(self):
"""Rotate the carousel one tile to the left."""
self.direction = -1
self.index_diff += 1.0
def step_right(self):
"""Rotate the carousel one tile to the right."""
self.direction = 1
self.index_diff += 1.0
def rotate_left(self):
"""Start the carousel rotating continuously to the left."""
self.is_rotating = True
self.direction = -1
def rotate_right(self):
"""Start the carousel rotating continuously to the right."""
self.is_rotating = True
self.direction = 1
def stop_rotating(self):
"""Stop continuous rotation and make sure we end up at a tile location."""
self.index_diff = round(self.float_index) - self.float_index
if self.index_diff < 0:
self.direction = -1
else:
self.direction = 1
self.index_diff = abs(self.index_diff)
def draw(self):
glPushMatrix()
glRotatef(-self.angle, 0, 1, 0)
for i in range(self.num_tiles):
self.draw_tile(i)
glPopMatrix()
def draw_tile(self, index):
angle = index * (360.0 / self.num_tiles)
glPushMatrix()
glRotatef(angle, 0, 1, 0)
glTranslatef(0, 0, -7.5)
glRotatef(-angle + self.angle, 0, 1, 0)
texture = self.glyphs[self.tiles[index].value].texture
vertex_list = pyglet.graphics.vertex_list(4, 'v2f',
('t3f', texture.tex_coords))
vertex_list.vertices[:] = [-1, -1, 1, -1, 1, 1, -1, 1]
# Draw tile background.
glColor3ub(*self.tiles[index].color)
vertex_list.draw(GL_QUADS)
# Draw tile label.
glBindTexture(texture.target, texture.id)
glEnable(texture.target)
glColor3ub(0, 0, 0)
vertex_list.vertices[:] = [.8, -.8, -.8, -.8, -.8, .8, .8, .8]
glTranslatef(0, 0, -.01)
vertex_list.draw(GL_QUADS)
glDisable(texture.target)
glPopMatrix()
def update(self, dt):
if self.is_rotating or self.index_diff:
increment = self.direction * self.speed * self.float_increment * dt
self.float_index = (self.float_index + increment) % self.num_tiles
if self.index_diff:
self.index_diff -= abs(increment)
if self.index_diff < 0:
self.index_diff = 0
self.float_index = round(self.float_index) % self.num_tiles
self.index = int(self.float_index)
self.is_rotating = False
self.angle = (self.float_index / self.num_tiles) * 360
window = MainWindow()
window.clear()
window.flip()
window.set_visible(True)
pyglet.app.run()
| |
# pyOCD debugger
# Copyright (c) 2013-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...coresight.coresight_target import CoreSightTarget
from ..family import target_kinetis
from . import target_MIMXRT1011xxxxx
from . import target_MIMXRT1015xxxxx
from . import target_MIMXRT1021xxxxx
from . import target_MIMXRT1024xxxxx
from . import target_MIMXRT1052xxxxB
from . import target_MIMXRT1062xxxxA
from . import target_MIMXRT1064xxxxA
from . import target_MKE15Z256xxx7
from . import target_MKE18F256xxx16
from . import target_MKL02Z32xxx4
from . import target_MKL05Z32xxx4
from . import target_MKL25Z128xxx4
from . import target_MKL26Z256xxx4
from . import target_MKL27Z256xxx4
from . import target_MKL28Z512xxx7
from . import target_MKL43Z256xxx4
from . import target_MKL46Z256xxx4
from . import target_MKL82Z128xxx7
from . import target_MKV10Z128xxx7
from . import target_MKV11Z128xxx7
from . import target_MKW01Z128xxx4
from . import target_MKW24D512xxx5
from . import target_MKW36Z512xxx4
from . import target_MKW40Z160xxx4
from . import target_MKW41Z512xxx4
from . import target_MK22FN1M0Axxx12
from . import target_MK22FN512xxx12
from . import target_MK28FN2M0xxx15
from . import target_MK64FN1M0xxx12
from . import target_MK66FN2M0xxx18
from . import target_MK82FN256xxx15
from . import target_MK20DX128xxx5
from . import target_K32W042S1M2xxx
from . import target_K32L2B
from . import target_lpc800
from . import target_LPC11U24FBD64_401
from . import target_LPC1768
from . import target_LPC4330
from . import target_nRF51822_xxAA
from . import target_nRF52832_xxAA
from . import target_nRF52840_xxAA
from . import target_STM32F103RC
from . import target_STM32F051T8
from . import target_STM32F412xx
from . import target_STM32F429xx
from . import target_STM32F439xx
from . import target_STM32L475xx
from . import target_STM32L031x6
from . import target_STM32F767xx
from . import target_MAX32600
from . import target_MAX32620
from . import target_MAX32625
from . import target_MAX32630
from . import target_w7500
from . import target_s5js100
from . import target_LPC1114FN28_102
from . import target_LPC824M201JHI33
from . import target_LPC54114J256BD64
from . import target_LPC54608J512ET180
from . import target_ncs36510
from . import target_LPC4088FBD144
from . import target_lpc4088qsb
from . import target_lpc4088dm
from . import target_RTL8195AM
from . import target_CC3220SF
from . import target_CC3220SF
from ..family import target_psoc6
from .cypress import target_CY8C6xxA
from .cypress import target_CY8C6xx7
from .cypress import target_CY8C6xx5
from .cypress import target_CY8C64xx
from .cypress import target_CY8C64xA
from .cypress import target_CY8C64x5
from . import target_musca_a1
from . import target_musca_b1
from . import target_LPC55S69Jxxxxx
from . import target_LPC55S28Jxxxxx
from . import target_M251
from . import target_M261
from . import target_M480
from . import target_HC32F46x
from . import target_HC32F4A0
from . import target_HC32M423
from . import target_HC32x120
from . import target_HC32L110
from . import target_HC32L13x
from . import target_HC32L19x
from . import target_HC32L07x
from . import target_MPS3_AN522
from . import target_MPS3_AN540
## @brief Dictionary of all builtin targets.
BUILTIN_TARGETS = {
'mps3_an522': target_MPS3_AN522.AN522,
'mps3_an540': target_MPS3_AN540.AN540,
'cortex_m': CoreSightTarget,
'kinetis': target_kinetis.Kinetis,
'ke15z7': target_MKE15Z256xxx7.KE15Z7,
'ke18f16': target_MKE18F256xxx16.KE18F16,
'kl02z': target_MKL02Z32xxx4.KL02Z,
'kl05z': target_MKL05Z32xxx4.KL05Z,
'kl25z': target_MKL25Z128xxx4.KL25Z,
'kl26z': target_MKL26Z256xxx4.KL26Z,
'kl27z4': target_MKL27Z256xxx4.KL27Z4,
'kl28z': target_MKL28Z512xxx7.KL28x,
'kl43z4': target_MKL43Z256xxx4.KL43Z4,
'kl46z': target_MKL46Z256xxx4.KL46Z,
'kl82z7': target_MKL82Z128xxx7.KL82Z7,
'kv10z7': target_MKV10Z128xxx7.KV10Z7,
'kv11z7': target_MKV11Z128xxx7.KV11Z7,
'kw01z4': target_MKW01Z128xxx4.KW01Z4,
'kw24d5': target_MKW24D512xxx5.KW24D5,
'kw36z4': target_MKW36Z512xxx4.KW36Z4,
'kw40z4': target_MKW40Z160xxx4.KW40Z4,
'kw41z4': target_MKW41Z512xxx4.KW41Z4,
'k20d50m': target_MK20DX128xxx5.K20D50M,
'k22fa12': target_MK22FN1M0Axxx12.K22FA12,
'k22f': target_MK22FN512xxx12.K22F,
'k28f15': target_MK28FN2M0xxx15.K28F15,
'k64f': target_MK64FN1M0xxx12.K64F,
'k66f18': target_MK66FN2M0xxx18.K66F18,
'k82f25615': target_MK82FN256xxx15.K82F25615,
'k32w042s': target_K32W042S1M2xxx.K32W042S,
'k32l2b3': target_K32L2B.K32L2B3,
'lpc800': target_lpc800.LPC800,
'lpc11u24': target_LPC11U24FBD64_401.LPC11U24,
'lpc1768': target_LPC1768.LPC1768,
'lpc4330': target_LPC4330.LPC4330,
'max32600': target_MAX32600.MAX32600,
'max32620': target_MAX32620.MAX32620,
'max32625': target_MAX32625.MAX32625,
'max32630': target_MAX32630.MAX32630,
'mimxrt1010': target_MIMXRT1011xxxxx.MIMXRT1011xxxxx,
'mimxrt1015': target_MIMXRT1015xxxxx.MIMXRT1015xxxxx,
'mimxrt1020': target_MIMXRT1021xxxxx.MIMXRT1021xxxxx,
'mimxrt1024': target_MIMXRT1024xxxxx.MIMXRT1024xxxxx,
'mimxrt1050_quadspi': target_MIMXRT1052xxxxB.MIMXRT1052xxxxB_quadspi,
'mimxrt1050_hyperflash': target_MIMXRT1052xxxxB.MIMXRT1052xxxxB_hyperflash,
'mimxrt1050': target_MIMXRT1052xxxxB.MIMXRT1052xxxxB_hyperflash, # Alias for default external flash.
'mimxrt1060': target_MIMXRT1062xxxxA.MIMXRT1062xxxxA,
'mimxrt1064': target_MIMXRT1064xxxxA.MIMXRT1064xxxxA,
'nrf51': target_nRF51822_xxAA.NRF51,
'nrf52': target_nRF52832_xxAA.NRF52832,
'nrf52840' : target_nRF52840_xxAA.NRF52840,
'stm32f103rc': target_STM32F103RC.STM32F103RC,
'stm32f051': target_STM32F051T8.STM32F051,
'stm32f412xe' : target_STM32F412xx.STM32F412xE,
'stm32f412xg' : target_STM32F412xx.STM32F412xG,
'stm32f429xg' : target_STM32F429xx.STM32F429xG,
'stm32f429xi' : target_STM32F429xx.STM32F429xI,
'stm32f439xg' : target_STM32F439xx.STM32F439xG,
'stm32f439xi' : target_STM32F439xx.STM32F439xI,
'stm32f767zi' : target_STM32F767xx.STM32F767xx,
'stm32l475xc' : target_STM32L475xx.STM32L475xC,
'stm32l475xe' : target_STM32L475xx.STM32L475xE,
'stm32l475xg' : target_STM32L475xx.STM32L475xG,
'stm32l031x6' : target_STM32L031x6.STM32L031x6,
'w7500': target_w7500.W7500,
's5js100': target_s5js100.S5JS100,
'lpc11xx_32': target_LPC1114FN28_102.LPC11XX_32,
'lpc824': target_LPC824M201JHI33.LPC824,
'lpc54114': target_LPC54114J256BD64.LPC54114,
'lpc54608': target_LPC54608J512ET180.LPC54608,
'lpc4088': target_LPC4088FBD144.LPC4088,
'ncs36510': target_ncs36510.NCS36510,
'lpc4088qsb': target_lpc4088qsb.LPC4088qsb,
'lpc4088dm': target_lpc4088dm.LPC4088dm,
'rtl8195am': target_RTL8195AM.RTL8195AM,
'cc3220sf': target_CC3220SF.CC3220SF,
'cy8c6xxa': target_CY8C6xxA.CY8C6xxA,
'cy8c6xx7': target_CY8C6xx7.CY8C6xx7,
'cy8c6xx7_s25fs512s': target_CY8C6xx7.CY8C6xx7_S25FS512S,
'cy8c6xx7_nosmif': target_CY8C6xx7.CY8C6xx7_nosmif,
'cy8c6xx5': target_CY8C6xx5.CY8C6xx5,
'cy8c64_sysap': target_psoc6.cy8c64_sysap,
'cy8c64xx_cm0': target_CY8C64xx.cy8c64xx_cm0,
'cy8c64xx_cm4': target_CY8C64xx.cy8c64xx_cm4,
'cy8c64xx_cm0_s25hx512t': target_CY8C64xx.cy8c64xx_cm0_s25hx512t,
'cy8c64xx_cm4_s25hx512t': target_CY8C64xx.cy8c64xx_cm4_s25hx512t,
'cy8c64xx_cm0_nosmif': target_CY8C64xx.cy8c64xx_cm0_nosmif,
'cy8c64xx_cm4_nosmif': target_CY8C64xx.cy8c64xx_cm4_nosmif,
'cy8c64xa_cm0': target_CY8C64xA.cy8c64xA_cm0,
'cy8c64xa_cm4': target_CY8C64xA.cy8c64xA_cm4,
'cy8c64x5_cm0': target_CY8C64x5.cy8c64x5_cm0,
'cy8c64x5_cm4': target_CY8C64x5.cy8c64x5_cm4,
'musca_a1' : target_musca_a1.MuscaA1,
'musca_b1' : target_musca_b1.MuscaB1,
'lpc55s69' : target_LPC55S69Jxxxxx.LPC55S69,
'lpc55s28' : target_LPC55S28Jxxxxx.LPC55S28,
'cy8c64xx_cm0_full_flash' : target_CY8C64xx.cy8c64xx_cm0_full_flash,
'cy8c64xx_cm4_full_flash' : target_CY8C64xx.cy8c64xx_cm4_full_flash,
'cy8c64xa_cm0_full_flash' : target_CY8C64xA.cy8c64xA_cm0_full_flash,
'cy8c64xa_cm4_full_flash' : target_CY8C64xA.cy8c64xA_cm4_full_flash,
'cy8c64x5_cm0_full_flash' : target_CY8C64x5.cy8c64x5_cm0_full_flash,
'cy8c64x5_cm4_full_flash' : target_CY8C64x5.cy8c64x5_cm4_full_flash,
'm252kg6ae' : target_M251.M252KG6AE,
'm263kiaae' : target_M261.M263KIAAE,
'm487jidae' : target_M480.M487JIDAE,
'hc32f46x' : target_HC32F46x.HC32F46x,
'hc32f4a0xg' : target_HC32F4A0.HC32F4A0xG,
'hc32f4a0xi' : target_HC32F4A0.HC32F4A0xI,
'hc32f120x6' : target_HC32x120.HC32F120x6TA,
'hc32f120x8' : target_HC32x120.HC32F120x8TA,
'hc32m120' : target_HC32x120.HC32M120,
'hc32m423' : target_HC32M423.HC32M423,
'hc32l110' : target_HC32L110.HC32L110,
'hc32f003' : target_HC32L110.HC32F003,
'hc32f005' : target_HC32L110.HC32F005,
'hc32l136' : target_HC32L13x.HC32L136,
'hc32l130' : target_HC32L13x.HC32L130,
'hc32f030' : target_HC32L13x.HC32F030,
'hc32l196' : target_HC32L19x.HC32L196,
'hc32l190' : target_HC32L19x.HC32L190,
'hc32f196' : target_HC32L19x.HC32F196,
'hc32f190' : target_HC32L19x.HC32F190,
'hc32l072' : target_HC32L07x.HC32L072,
'hc32l073' : target_HC32L07x.HC32L073,
'hc32f072' : target_HC32L07x.HC32F072,
}
| |
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import copy
import os.path
import sys
import unittest
import xml.etree.ElementTree as ET # noqa: What's wrong with ET?
from contrib import mergeBenchmarkSets
from benchexec import result
sys.dont_write_bytecode = True # prevent creation of .pyc files
results_xml = ET.parse( # noqa S314, the XML is trusted
os.path.join(os.path.dirname(__file__), "mock_results.xml")
).getroot()
witness_xml_1 = ET.parse( # noqa S314, the XML is trusted
os.path.join(os.path.dirname(__file__), "mock_witness_1.xml")
).getroot()
witness_xml_2 = ET.parse( # noqa S314, the XML is trusted
os.path.join(os.path.dirname(__file__), "mock_witness_2.xml")
).getroot()
files = [
"../sv-benchmarks/c/array-examples/sanfoundry_24-1.yml",
"../sv-benchmarks/c/array-examples/data_structures_set_multi_proc_trivial_ground.yml",
"../sv-benchmarks/c/array-patterns/array28_pattern.yml",
"../sv-benchmarks/c/reducercommutativity/rangesum05.yml",
"../sv-benchmarks/c/array-fpi/indp4f.yml",
]
def mock_witness_sets():
witness_sets = {}
for witness in [witness_xml_1, witness_xml_2]:
for run in witness.findall("run"):
name = run.get("name")
witness_sets[name] = run
return [witness_sets]
def mock_get_verification_result(name):
return results_xml.find(f"run[@name='{name}']")
def mock_get_witness(name):
witness = mock_witness_sets()[0].get(name)
if witness is None:
raise NotImplementedError(name)
return witness
def element_trees_equal(et1, et2):
if len(et1) != len(et2) or et1.tag != et2.tag or et1.attrib != et2.attrib:
return False
return all(element_trees_equal(child1, child2) for child1, child2 in zip(et1, et2))
class TestMergeBenchmarkSets(unittest.TestCase):
def test_only_elem(self):
new_results = mergeBenchmarkSets.xml_to_string(results_xml)
new_witness_1 = mergeBenchmarkSets.xml_to_string(witness_xml_1)
new_witness_2 = mergeBenchmarkSets.xml_to_string(witness_xml_2)
self.assertTrue(
element_trees_equal(
ET.fromstring(new_results), results_xml # noqa S314, the XML is trusted
)
)
self.assertTrue(
element_trees_equal(
ET.fromstring(new_witness_1), # noqa S314, the XML is trusted
witness_xml_1,
)
)
self.assertTrue(
element_trees_equal(
ET.fromstring(new_witness_2), # noqa S314, the XML is trusted
witness_xml_2,
)
)
def test_set_doctype(self):
qualified_name = "result"
public_id = "+//IDN sosy-lab.org//DTD BenchExec result 1.18//EN"
system_id = "https://www.sosy-lab.org/benchexec/result-1.18.dtd"
new_results = mergeBenchmarkSets.xml_to_string(
results_xml, qualified_name, public_id, system_id
)
new_witness_1 = mergeBenchmarkSets.xml_to_string(
witness_xml_1, qualified_name, public_id, system_id
)
new_witness_2 = mergeBenchmarkSets.xml_to_string(
witness_xml_2, qualified_name, public_id, system_id
)
self.assertTrue(
element_trees_equal(
results_xml, ET.fromstring(new_results) # noqa S314, the XML is trusted
)
)
self.assertTrue(
element_trees_equal(
witness_xml_1,
ET.fromstring(new_witness_1), # noqa S314, the XML is trusted
)
)
self.assertTrue(
element_trees_equal(
witness_xml_2,
ET.fromstring(new_witness_2), # noqa S314, the XML is trusted
)
)
for xml in [new_results, new_witness_1, new_witness_2]:
self.assertListEqual(
[line.strip() for line in xml.splitlines()[1:4]],
[
f"<!DOCTYPE {qualified_name}",
f"PUBLIC '{public_id}'",
f"'{system_id}'>",
],
)
def test_getWitnesses(self):
witness1 = mergeBenchmarkSets.get_witnesses(witness_xml_1)
witness2 = mergeBenchmarkSets.get_witnesses(witness_xml_2)
self.assertEqual(3, len(witness1))
self.assertEqual(2, len(witness2))
self.assertSetEqual(
{
"../sv-benchmarks/c/array-examples/sanfoundry_24-1.yml",
"../sv-benchmarks/c/array-examples/data_structures_set_multi_proc_trivial_ground.yml",
"../sv-benchmarks/c/array-patterns/array28_pattern.yml",
},
set(witness1.keys()),
)
self.assertSetEqual(
{
"../sv-benchmarks/c/reducercommutativity/rangesum05.yml",
"../sv-benchmarks/c/array-fpi/indp4f.yml",
},
set(witness2.keys()),
)
def test_getWitnessResult_no_witness(self):
self.assertEqual(
("witness missing", result.CATEGORY_ERROR),
mergeBenchmarkSets.get_witness_result(None, None),
)
self.assertEqual(
("witness missing", result.CATEGORY_ERROR),
mergeBenchmarkSets.get_witness_result(None, results_xml.find("run")),
)
def test_getWitnessResult_no_verification_result(self):
for file in files[:-1]:
self.assertEqual(
("result invalid (not found)", result.CATEGORY_ERROR),
mergeBenchmarkSets.get_witness_result(mock_get_witness(file), None),
)
self.assertEqual(
("witness invalid (not found)", result.CATEGORY_ERROR),
mergeBenchmarkSets.get_witness_result(mock_get_witness(files[-1]), None),
)
def test_getWitnessResult(self):
expected_results = [
("true", result.CATEGORY_CORRECT_UNCONFIRMED),
("result invalid (TIMEOUT)", result.CATEGORY_ERROR),
("result invalid (false(unreach-call))", result.CATEGORY_ERROR),
("false(unreach-call)", result.CATEGORY_CORRECT),
("witness invalid (false(unreach-call))", result.CATEGORY_ERROR),
]
for expected, file in zip(expected_results, files):
self.assertEqual(
expected,
mergeBenchmarkSets.get_witness_result(
mock_get_witness(file), mock_get_verification_result(file)
),
)
def test_getValidationResult_single_witness(self):
expected_results = [
("true", result.CATEGORY_CORRECT_UNCONFIRMED),
("result invalid (TIMEOUT)", result.CATEGORY_ERROR),
("result invalid (false(unreach-call))", result.CATEGORY_ERROR),
("false(unreach-call)", result.CATEGORY_CORRECT),
("witness invalid (false(unreach-call))", result.CATEGORY_ERROR),
]
for expected, file in zip(expected_results, files):
run = mock_get_verification_result(file)
status_from_verification = run.find('column[@title="status"]').get("value")
category_from_verification = run.find('column[@title="category"]').get(
"value"
)
actual = mergeBenchmarkSets.get_validation_result(
run,
mock_witness_sets(),
status_from_verification,
category_from_verification,
)
self.assertEqual(expected, actual[:2])
self.assertEqual(
(status_from_verification, category_from_verification), actual[2:]
)
def test_getValidationResult_multiple_witnesses(self):
new_witness_results = [
("ERROR (invalid witness syntax)", result.CATEGORY_ERROR),
("ERROR (invalid witness file)", result.CATEGORY_ERROR),
("false (unreach-call)", result.CATEGORY_WRONG),
("true", result.CATEGORY_WRONG),
("false (unreach-call)", result.CATEGORY_CORRECT),
]
expected_results = [
("witness invalid (true)", result.CATEGORY_ERROR),
("result invalid (TIMEOUT)", result.CATEGORY_ERROR),
("result invalid (false(unreach-call))", result.CATEGORY_ERROR),
("false(unreach-call)", result.CATEGORY_CORRECT),
("witness invalid (false(unreach-call))", result.CATEGORY_ERROR),
]
witness_set_1 = mock_witness_sets()
witness_set_2 = copy.deepcopy(witness_set_1)
for expected, file, new_witness_result in zip(
expected_results, files, new_witness_results
):
verification_run = mock_get_verification_result(file)
witness_run = witness_set_2[0].get(file)
witness_run.find('column[@title="status"]').set(
"value", new_witness_result[0]
)
witness_run.find('column[@title="category"]').set(
"value", new_witness_result[1]
)
status_from_verification = verification_run.find(
'column[@title="status"]'
).get("value")
category_from_verification = verification_run.find(
'column[@title="category"]'
).get("value")
actual = mergeBenchmarkSets.get_validation_result(
verification_run,
witness_set_1 + [{file: witness_run}],
status_from_verification,
category_from_verification,
)
self.assertEqual(expected, actual[:2])
self.assertEqual(
(status_from_verification, category_from_verification), actual[2:]
)
def test_getValidationResult_coverage_error_call(self):
expected_results = [
(None, None),
(None, None),
("false(unreach-call)", result.CATEGORY_CORRECT),
(None, None),
(None, None),
]
for expected, file in zip(expected_results, files):
run = copy.deepcopy(mock_get_verification_result(file))
run.set("properties", "coverage-error-call")
status_from_verification = run.find('column[@title="status"]').get("value")
category_from_verification = run.find('column[@title="category"]').get(
"value"
)
actual = mergeBenchmarkSets.get_validation_result(
run,
mock_witness_sets(),
status_from_verification,
category_from_verification,
)
self.assertEqual(expected, actual[:2])
self.assertEqual(status_from_verification, actual[2])
if file == "../sv-benchmarks/c/array-patterns/array28_pattern.yml":
self.assertEqual(result.CATEGORY_CORRECT, actual[3])
self.assertNotEqual(None, run.find('column[@title="score"]'))
else:
self.assertEqual(category_from_verification, actual[3])
def test_getValidationResult_coverage_branches(self):
for file in files:
run = copy.deepcopy(mock_get_verification_result(file))
run.set("properties", "coverage-branches")
status_from_verification = run.find('column[@title="status"]').get("value")
category_from_verification = run.find('column[@title="category"]').get(
"value"
)
actual = mergeBenchmarkSets.get_validation_result(
run,
mock_witness_sets(),
status_from_verification,
category_from_verification,
)
self.assertTupleEqual(
(
status_from_verification,
result.CATEGORY_CORRECT,
status_from_verification,
result.CATEGORY_CORRECT,
),
actual,
)
self.assertNotEqual(None, run.find('column[@title="score"]'))
def test_merge_no_witness(self):
results_xml_cp1 = copy.deepcopy(results_xml)
results_xml_cp2 = copy.deepcopy(results_xml)
mergeBenchmarkSets.merge(results_xml_cp2, [], True)
for run in results_xml_cp1.findall("run"):
del run.attrib["logfile"]
self.assertEqual(ET.tostring(results_xml_cp1), ET.tostring(results_xml_cp2))
def test_merge(self):
expected_results = [
("true", result.CATEGORY_CORRECT_UNCONFIRMED),
("false(unreach-call)", result.CATEGORY_CORRECT),
("TIMEOUT", result.CATEGORY_ERROR),
("witness invalid (false(unreach-call))", result.CATEGORY_ERROR),
("false(unreach-call)", result.CATEGORY_WRONG),
]
results_xml_cp = copy.deepcopy(results_xml)
mergeBenchmarkSets.merge(results_xml_cp, mock_witness_sets(), True)
for expected, run in zip(expected_results, results_xml_cp.findall("run")):
status = run.find('column[@title="status"]').get("value")
category = run.find('column[@title="category"]').get("value")
self.assertTupleEqual(expected, (status, category))
def test_merge_no_overwrite(self):
expected_results = [
("true", result.CATEGORY_CORRECT),
("false(unreach-call)", result.CATEGORY_CORRECT),
("TIMEOUT", result.CATEGORY_ERROR),
("witness invalid (false(unreach-call))", result.CATEGORY_ERROR),
("false(unreach-call)", result.CATEGORY_WRONG),
]
results_xml_cp = copy.deepcopy(results_xml)
mergeBenchmarkSets.merge(results_xml_cp, mock_witness_sets(), False)
for expected, run in zip(expected_results, results_xml_cp.findall("run")):
status = run.find('column[@title="status"]').get("value")
category = run.find('column[@title="category"]').get("value")
self.assertTupleEqual(expected, (status, category))
| |
from django.db import models
from django import forms
import datetime
from django.template.defaultfilters import slugify
from django.conf import settings
CURRENT_SEASON = getattr(settings, 'CURRENT_SEASON', datetime.date.today().year)
STATUS_CHOICES = (
('FR', 'Freshman'),
('SO', 'Sophomore'),
('JR', 'Junior'),
('SR', 'Senior'),
)
POSITION_TYPE_CHOICES = (
('O', 'Offense'),
('D', 'Defense'),
('S', 'Special Teams'),
)
SIDE_CHOICES = (
('O', 'Own'),
('P', 'Opponents'),
)
RESULT_CHOICES = (
('W', 'Win'),
('L', 'Loss'),
('T', 'Tie'),
)
GAME_TYPE_CHOICES = (
('H', 'Home'),
('A', 'Away'),
('N', 'Neutral Site'),
)
PLAY_CHOICES = (
('R', 'Run'),
('P', 'Pass'),
('F', 'Field Goal'),
('X', 'Extra Point'),
('N', 'Penalty'),
('K', 'Kickoff'),
('U', 'Punt'),
('T', 'Turnover'),
)
DIVISION_CHOICES = (
('B', 'Bowl Subdivision'),
('C', 'Championship Subdivision'),
('D', 'Division II'),
('T', 'Division III'),
)
class State(models.Model):
id = models.CharField(max_length=2, editable=False, primary_key=True)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return "/states/%s/" % self.id.lower()
class StateForm(forms.Form):
name = forms.ModelChoiceField(queryset=State.objects.all().order_by('name'))
class City(models.Model):
name = models.CharField(max_length=75)
slug = models.SlugField(max_length=75)
state = models.ForeignKey(State, null=True, blank=True)
def __unicode__(self):
if self.state:
return "%s, %s" % (self.name, self.state.id)
else:
return self.name
def get_absolute_url(self):
return "/states/%s/%s/" % (self.state.id.lower(), self.slug)
class Meta:
verbose_name_plural = 'cities'
class Week(models.Model):
season = models.IntegerField()
week_num = models.IntegerField()
end_date = models.DateField()
def __unicode__(self):
return "Week %s, %s" % (self.week_num, self.season)
def week_games_url(self):
return "/seasons/%s/week/%s/" % (self.season, self.week_num)
class Conference(models.Model):
abbrev = models.CharField(max_length=10)
name = models.CharField(max_length=90)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/conferences/%s/' % self.abbrev.lower()
class College(models.Model):
name = models.CharField(max_length=90)
slug = models.SlugField(max_length=90)
drive_slug = models.CharField(max_length=90)
# city = models.ForeignKey(City, blank=True) #
state = models.ForeignKey(State, blank=True)
official_url = models.CharField(max_length=120, blank=True)
official_rss = models.CharField(max_length=120, blank=True)
updated = models.BooleanField()
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/teams/%s/' % self.slug
def current_record(self):
current_season = self.collegeyear_set.get(season=datetime.date.today()).year
return "(%d-%d)" % (current_season.wins, current_season.losses)
class Meta:
ordering = ['name', 'state']
class CollegeYear(models.Model):
college = models.ForeignKey(College)
season = models.IntegerField()
wins = models.IntegerField(default=0)
losses = models.IntegerField(default=0)
ties = models.IntegerField(default=0)
conference_wins = models.IntegerField(default=0)
conference_losses = models.IntegerField(default=0)
conference_ties = models.IntegerField(default=0)
freshmen = models.IntegerField(default=0)
sophomores = models.IntegerField(default=0)
juniors = models.IntegerField(default=0)
seniors = models.IntegerField(default=0)
conference = models.ForeignKey(Conference, null=True, blank=True)
division = models.CharField(max_length=1, choices=DIVISION_CHOICES)
def __unicode__(self):
return "%s - %s" % (self.college.name, str(self.season))
def game_count(self):
return self.wins+self.losses+self.ties
def get_ncaa_week_url(self):
return 'http://web1.ncaa.org/football/exec/rankingSummary?year=%d&org=%d&week=' % (self.season, self.college.id)
def get_absolute_url(self):
return "/teams/%s/%s/" % (self.college.slug, self.season)
def get_conference_url(self):
if self.conference:
return "/conferences/%s/%s/" % (self.conference.abbrev, self.season)
def coaching_staff_url(self):
return self.get_absolute_url()+'coaches/'
def record(self):
if self.ties:
return "%s-%s-%s" % (self.wins, self.losses, self.ties)
else:
return "%s-%s" % (self.wins, self.losses)
def conference_record(self):
if self.conference_ties:
return "%s-%s-%s" % (self.conference_wins, self.conference_losses, self.conference_ties)
else:
return "%s-%s" % (self.conference_wins, self.conference_losses)
def coach_total(self):
return len(self.collegecoach_set.filter(end_date__isnull=True))
class Meta:
ordering = ['college', '-season']
class Coach(models.Model):
ncaa_name = models.CharField(max_length=90)
first_name = models.CharField(max_length=75)
last_name = models.CharField(max_length=75)
slug = models.CharField(max_length=75, editable=False)
college = models.ForeignKey(College, null=True, blank=True, related_name='School')
grad_year = models.IntegerField(null=True, blank=True)
birth_date = models.DateField(null=True, blank=True)
years = models.IntegerField(default=0, blank=True)
wins = models.IntegerField(default=0, blank=True)
losses = models.IntegerField(default=0, blank=True)
ties = models.IntegerField(default=0, blank=True)
def __unicode__(self):
return self.first_name + " " + self.last_name
def save(self):
super(Coach, self).save()
self.slug = '%s-%s-%s' % (str(self.id), slugify(self.first_name), slugify(self.last_name))
super(Coach, self).save()
def get_absolute_url(self):
return '/coaches/detail/%s/' % self.slug
def full_name(self):
return self.first_name + " " + self.last_name
def current_school(self):
try:
current_school = self.collegecoach_set.get(collegeyear__season__exact = CURRENT_SEASON, end_date = None).collegeyear.college
except:
current_school = None
return current_school
def seasons_at_school(self,school):
return [sorted([cy.collegeyear.season for cy in self.collegecoach_set.all() if cy.collegeyear.college == school])]
def seasons_at_current_school(self):
return len([cy.collegeyear.college.id for cy in self.collegecoach_set.all() if cy.collegeyear.college.id == self.current_school().id])
def current_job(self):
if self.current_school():
cy = self.collegecoach_set.filter(collegeyear__college=self.current_school).order_by('start_date')[0].jobs_display()
return cy
else:
return None
def head_coach_experience(self):
if 1 in sum([[j.id for j in job.jobs.all() if j.id == 1] for job in self.collegecoach_set.all()],[]):
return "Yes"
else:
return "No"
def years_since_2000(self):
return self.collegecoach_set.all().count()
def years_at_alma_mater_since_2000(self):
return len([a for a in self.collegecoach_set.all() if self.college == a.collegeyear.college])
def states_coached_in(self):
states = {}
state_list = [s.collegeyear.college.state.id for s in self.collegecoach_set.all()]
[states.setdefault(e,500) for e in state_list if e not in states]
return states
def coaching_peers(self):
from django.db import connection
cursor = connection.cursor()
year_ids = [str(c.collegeyear.id) for c in self.collegecoach_set.all()]
if len(year_ids) > 0:
cursor.execute("SELECT distinct college_coach.id FROM college_coach INNER JOIN college_collegecoach ON college_coach.id=college_collegecoach.coach_id WHERE college_collegecoach.collegeyear_id IN (%s)" % ','.join(year_ids))
results = cursor.fetchall()
ids = [c[0] for c in results]
return Coach.objects.filter(id__in=ids).exclude(id=self.id)
else:
return Coach.objects.none()
class Meta:
ordering = ['last_name', 'first_name']
verbose_name_plural = 'Coaches'
class CoachForm(forms.Form):
name = forms.CharField(max_length=50, initial='Last name')
class CoachDetailForm(forms.Form):
coaches = forms.ModelChoiceField(queryset=Coach.objects.none())
def __init__(self, coaches, *args, **kwargs):
super(CoachDetailForm, self).__init__(*args, **kwargs)
self.fields["coaches"].queryset = coaches
class CoachingJob(models.Model):
name = models.CharField(max_length=75)
slug = models.SlugField(max_length=75)
def __unicode__(self):
return self.name
class CollegeCoach(models.Model):
coach = models.ForeignKey(Coach)
collegeyear = models.ForeignKey(CollegeYear)
jobs = models.ManyToManyField(CoachingJob)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
is_head_coach = models.BooleanField(default=False)
def __unicode__(self):
return "%s: %s" % (self.coach, self.collegeyear)
def get_absolute_url(self):
return self.coach.get_absolute_url()
def jobs_display(self):
return ", ".join([x.name for x in self.jobs.all()])
def is_current_job(self):
if self.collegeyear.season == CURRENT_SEASON and self.end_date == None:
return True
else:
return False
def partial_season(self):
if end_date:
return True
else:
return False
def feed_date(self):
if self.start_date and self.end_date:
return self.end_date
elif self.start_date:
return self.start_date
elif self.end_date:
return self.end_date
def feed_action(self):
if self.start_date and self.end_date:
return "Departed"
elif self.start_date:
return "Hired"
elif self.end_date:
return "Departed"
class Meta:
ordering = ['coach__last_name','-collegeyear__season']
verbose_name_plural = 'College coaches'
class CollegeTotal(models.Model):
college = models.ForeignKey(College)
season = models.IntegerField()
third_down_attempts = models.IntegerField(default=0)
third_down_conversions = models.IntegerField(default=0)
fourth_down_attempts = models.IntegerField(default=0)
fourth_down_conversions = models.IntegerField(default=0)
first_downs_rushing = models.IntegerField(default=0)
first_downs_passing = models.IntegerField(default=0)
first_downs_penalty = models.IntegerField(default=0)
first_downs_total = models.IntegerField(default=0)
penalties = models.IntegerField(default=0)
penalty_yards = models.IntegerField(default=0)
fumbles = models.IntegerField(default=0)
fumbles_lost = models.IntegerField(default=0)
rushes = models.IntegerField(default=0)
rush_gain = models.IntegerField(default=0)
rush_loss = models.IntegerField(default=0)
rush_net = models.IntegerField(default=0)
rush_touchdowns = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_attempts = models.IntegerField(default=0)
pass_completions = models.IntegerField(default=0)
pass_interceptions = models.IntegerField(default=0)
pass_yards = models.IntegerField(default=0)
pass_touchdowns = models.IntegerField(default=0)
receptions = models.IntegerField(default=0)
receiving_yards = models.IntegerField(default=0)
receiving_touchdowns = models.IntegerField(default=0)
punts = models.IntegerField(default=0)
punt_yards = models.IntegerField(default=0)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_touchdowns = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_touchdowns = models.IntegerField(default=0)
touchdowns = models.IntegerField(default=0)
pat_attempts = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_point_conversion_attempts = models.IntegerField(default=0)
two_point_conversions = models.IntegerField(default=0)
field_goal_attempts = models.IntegerField(default=0)
field_goals_made = models.IntegerField(default=0)
points = models.IntegerField(default=0)
class Position(models.Model):
abbrev = models.CharField(max_length=5)
name = models.CharField(max_length=25)
plural_name = models.CharField(max_length=25)
position_type = models.CharField(max_length=1, choices=POSITION_TYPE_CHOICES)
def __unicode__(self):
return self.abbrev
def get_absolute_url(self):
return '/recruits/positions/%s/' % self.abbrev.lower()
class BowlGame(models.Model):
name = models.CharField(max_length=75)
slug = models.CharField(max_length=75)
city = models.ForeignKey(City)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/bowl-games/%s/' % self.slug
class Game(models.Model):
season = models.IntegerField()
team1 = models.ForeignKey(CollegeYear, related_name='team1')
coach1 = models.ForeignKey(Coach, null=True, blank=True, related_name='first_coach')
team2 = models.ForeignKey(CollegeYear, related_name='team2')
coach2 = models.ForeignKey(Coach, null=True, blank=True, related_name='second_coach')
date = models.DateField()
week = models.ForeignKey(Week)
t1_game_type = models.CharField(max_length=1, choices=GAME_TYPE_CHOICES)
t1_result = models.CharField(max_length=1, choices=RESULT_CHOICES, blank=True)
team1_score = models.IntegerField(null=True, blank=True)
team2_score = models.IntegerField(null=True, blank=True)
site = models.CharField(max_length=90, blank=True)
attendance = models.IntegerField(null=True, blank=True)
overtime = models.CharField(max_length=5, blank=True)
ncaa_xml = models.CharField(max_length=120, blank=True)
duration = models.TimeField(null=True, blank=True)
has_drives = models.BooleanField()
has_stats = models.BooleanField()
has_player_stats = models.BooleanField()
has_plays = models.BooleanField()
is_conference_game = models.BooleanField()
is_bowl_game = models.BooleanField()
bowl_game = models.ForeignKey(BowlGame, null=True, blank=True)
def __unicode__(self):
return '%s vs. %s, %s' % (self.team1, self.team2, self.date)
def team1_name(self):
return self.team1.college.name
def team2_name(self):
return self.team2.college.name
def get_absolute_url(self):
return '/teams/%s/vs/%s/%s/%s/%s/' % (self.team1.college.slug, self.team2.college.slug, self.date.year, self.date.month, self.date.day)
def get_matchup_url(self):
return '/teams/%s/vs/%s/' % (self.team1.college.slug, self.team2.college.slug)
def get_reverse_url(self):
return '/teams/%s/vs/%s/%s/%s/%s/' % (self.team2.college.slug, self.team1.college.slug, self.date.year, self.date.month, self.date.day)
def get_ncaa_xml_url(self):
return 'http://web1.ncaa.org/d1mfb/%s/Internet/worksheets/%s.xml' % (self.season, self.ncaa_xml.strip())
def get_ncaa_drive_url(self):
return "http://web1.ncaa.org/mfb/driveSummary.jsp?acadyr=%s&h=%s&v=%s&date=%s&game=%s" % (self.season, self.team1.college.id, self.team2.id, self.date.strftime("%d-%b-%y").upper(), self.ncaa_xml.strip())
def get_ncaa_scoring_url(self):
return "http://web1.ncaa.org/mfb/scoreSummary.jsp?acadyr=%s&h=%s&v=%s&date=%s&game=%s" % (self.season, self.team1.college.id, self.team2.id, self.date.strftime("%d-%b-%y").upper(), self.ncaa_xml.strip())
def get_play_by_play_url(self):
return "http://web1.ncaa.org/mfb/driveSummary.jsp?expand=A&acadyr=%s&h=%s&v=%s&date=%s&game=%s" % (self.season, self.team1.college.id, self.team2.id, self.date.strftime("%d-%b-%y").upper(), self.ncaa_xml.strip())
def margin(self):
return self.team1_score-self.team2_score
def display(self):
if self.margin() > 0:
return "%s %s, %s %s" % (self.team1.college, self.team1_score, self.team2.college, self.team2_score)
else:
return "%s %s, %s %s" % (self.team2.college, self.team2_score, self.team1.college, self.team1_score)
class QuarterScore(models.Model):
"Represents a team's scoring during a quarter of a game. OT periods begin with 5."
"Not implemented yet."
game = models.ForeignKey(Game)
team = models.ForeignKey(CollegeYear)
season = models.IntegerField()
quarter = models.IntegerField(default=CURRENT_SEASON)
points = models.PositiveIntegerField(default=0)
def __unicode__(self):
return "%s - %s" (self.team, self.quarter)
class GameScore(models.Model):
game = models.ForeignKey(Game)
team = models.ForeignKey(CollegeYear)
season = models.IntegerField()
description = models.CharField(max_length=255)
def __unicode__(self):
return self.description
class DriveOutcome(models.Model):
abbrev = models.CharField(max_length=10)
name = models.CharField(max_length=50, null=True)
slug = models.SlugField(max_length=50, null=True)
def __unicode__(self):
return self.name
class GameDrive(models.Model):
season = models.IntegerField()
game = models.ForeignKey(Game)
team = models.ForeignKey(CollegeYear)
drive = models.IntegerField()
quarter = models.PositiveSmallIntegerField()
start_how = models.CharField(max_length=25)
start_time = models.TimeField()
start_position = models.IntegerField()
start_side = models.CharField(max_length=1, choices=SIDE_CHOICES)
end_result = models.ForeignKey(DriveOutcome)
end_time = models.TimeField()
end_position = models.IntegerField(null=True)
end_side = models.CharField(max_length=1, choices=SIDE_CHOICES)
plays = models.IntegerField()
yards = models.IntegerField()
time_of_possession = models.TimeField()
def __unicode__(self):
return "%s: %s drive %s" % (self.game, self.team, self.drive)
class GamePlay(models.Model):
game = models.ForeignKey(Game)
offensive_team = models.ForeignKey(CollegeYear)
drive = models.ForeignKey(GameDrive, blank=True, null=True)
quarter = models.PositiveSmallIntegerField()
description = models.TextField()
down = models.IntegerField()
distance = models.IntegerField()
def __unicode__(self):
return "%s: %s: %s" % (self.game, self.offensive_team, self.description)
class GameDriveSeason(models.Model):
season = models.IntegerField()
team = models.ForeignKey(CollegeYear)
outcome = models.ForeignKey(DriveOutcome)
total = models.IntegerField(null=True)
drives_total = models.IntegerField(null=True)
def __unicode__(self):
return "%s: %s %s" % (self.season, self.team, self.outcome)
def pct_of_total(self):
return float(float(self.total)/float(self.drives_total))*100
class GameOffense(models.Model):
game = models.ForeignKey(Game)
team = models.ForeignKey(CollegeYear)
season = models.IntegerField()
third_down_attempts = models.IntegerField(default=0)
third_down_conversions = models.IntegerField(default=0)
fourth_down_attempts = models.IntegerField(default=0)
fourth_down_conversions = models.IntegerField(default=0)
time_of_possession = models.TimeField(null=True)
first_downs_rushing = models.IntegerField(default=0)
first_downs_passing = models.IntegerField(default=0)
first_downs_penalty = models.IntegerField(default=0)
first_downs_total = models.IntegerField(default=0)
penalties = models.IntegerField(default=0)
penalty_yards = models.IntegerField(default=0)
fumbles = models.IntegerField(default=0)
fumbles_lost = models.IntegerField(default=0)
rushes = models.IntegerField(default=0)
rush_gain = models.IntegerField(default=0)
rush_loss = models.IntegerField(default=0)
rush_net = models.IntegerField(default=0)
rush_touchdowns = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_attempts = models.IntegerField(default=0)
pass_completions = models.IntegerField(default=0)
pass_interceptions = models.IntegerField(default=0)
pass_yards = models.IntegerField(default=0)
pass_touchdowns = models.IntegerField(default=0)
receptions = models.IntegerField(default=0)
receiving_yards = models.IntegerField(default=0)
receiving_touchdowns = models.IntegerField(default=0)
punts = models.IntegerField(default=0)
punt_yards = models.IntegerField(default=0)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_touchdowns = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_touchdowns = models.IntegerField(default=0)
touchdowns = models.IntegerField(default=0)
pat_attempts = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_point_conversion_attempts = models.IntegerField(default=0)
two_point_conversions = models.IntegerField(default=0)
field_goal_attempts = models.IntegerField(default=0)
field_goals_made = models.IntegerField(default=0)
points = models.IntegerField(default=0)
def __unicode__(self):
return '%s - %s' % (self.game, self.team)
def third_down_rate(self):
return float(self.third_down_conversions)/float(self.third_down_attempts)
def field_goal_rate(self):
return float(self.field_goals_made)/float(self.field_goal_attempts)
def penalty_yard_ratio(self):
return float(self.penalty_yards)/float(self.total_yards)
def yards_per_reception(self):
return float(self.receiving_yards)/float(self.receptions)
def yards_per_pass_attempt(self):
return float(self.receiving_yards)/(self.pass_attempts)
def rushing_first_downs_pct(self):
return float(self.first_downs_rushing)/float(self.first_downs_total)*100
"""
Returns a floating-point number representing the number
of touchdowns per rushing attempt for a single game.
"""
def touchdowns_per_rushes(self):
return float(self.rush_touchdowns)/float(self.rushes)*100
"""
Returns the opponent for a team's given Game Offense record.
"""
def opponent(self):
if self.team == self.game.team2:
return self.game.team1
else:
return self.game.team2
class GameDefense(models.Model):
game = models.ForeignKey(Game)
team = models.ForeignKey(CollegeYear)
season = models.IntegerField()
safeties = models.IntegerField(default=0)
unassisted_tackles = models.IntegerField(default=0)
assisted_tackles = models.IntegerField(default=0)
unassisted_tackles_for_loss = models.IntegerField(default=0)
assisted_tackles_for_loss = models.IntegerField(default=0)
tackles_for_loss_yards = models.IntegerField(default=0)
unassisted_sacks = models.IntegerField(default=0)
assisted_sacks = models.IntegerField(default=0)
sack_yards = models.IntegerField(default=0)
defensive_interceptions = models.IntegerField(default=0)
defensive_interception_yards = models.IntegerField(default=0)
defensive_interception_touchdowns = models.IntegerField(default=0)
pass_breakups = models.IntegerField(default=0)
fumbles_forced = models.IntegerField(default=0)
fumbles_number = models.IntegerField(default=0)
fumbles_yards = models.IntegerField(default=0)
fumbles_touchdowns = models.IntegerField(default=0)
def __unicode__(self):
return '%s - %s' % (self.game, self.team)
class Player(models.Model):
name = models.CharField(max_length=120)
slug = models.SlugField(max_length=120)
team = models.ForeignKey(CollegeYear)
season = models.IntegerField()
position = models.ForeignKey(Position)
number = models.CharField(max_length=4)
games_played = models.PositiveIntegerField(default=0)
status = models.CharField(max_length=2, choices=STATUS_CHOICES)
def __unicode__(self):
return u"%s - %s" % (self.name, self.team)
@models.permalink
def get_absolute_url(self):
return ('college.views.player_detail', (), {
'team': self.team.college.slug,
'season': self.season,
'player': self.slug,
'number': self.number,
'position': self.position.abbrev
})
#return '/teams/%s/%s/players/%s/' % (self.team.college.slug, self.season, self.slug)
def get_team_position_url(self):
return '/teams/%s/%s/players/positions/%s/' % (self.team.college.slug, self.season, self.position.abbrev.lower())
def get_team_class_url(self):
return '/teams/%s/%s/players/class/%s/' % (self.team.college.slug, self.season, self.status.lower())
class Meta:
ordering = ['id']
class PlayerCollegeCareer(models.Model):
player = models.ForeignKey(Player)
first_season = models.ForeignKey(CollegeYear, related_name='first_season')
last_season = models.ForeignKey(CollegeYear, related_name='last_season')
total_games = models.IntegerField(null=True, blank=True)
def __unicode__(self):
return self.player.name.full_name()
class PlayerGame(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
played = models.BooleanField()
starter = models.BooleanField()
total_plays = models.IntegerField()
total_yards = models.IntegerField()
def __unicode__(self):
return self.player.name
class PlayerRush(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
rushes = models.IntegerField(default=0)
gain = models.IntegerField(default=0)
loss = models.IntegerField(default=0)
net = models.IntegerField(default=0)
td = models.IntegerField(default=0)
long_yards = models.IntegerField(default=0)
average = models.FloatField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class Meta:
verbose_name_plural = "player rushing"
class PlayerPass(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
attempts = models.IntegerField(default=0)
completions = models.IntegerField(default=0)
interceptions = models.IntegerField(default=0)
yards = models.IntegerField(default=0)
td = models.IntegerField(default=0)
conversions = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_efficiency = models.FloatField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def comp_att(self):
return "%d of %d" % (self.completions, self.attempts)
class Meta:
verbose_name_plural = 'player passing'
class PlayerReceiving(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
receptions = models.IntegerField(default=0)
yards = models.IntegerField(default=0)
td = models.IntegerField(default=0)
long_yards = models.IntegerField(default=0)
average = models.FloatField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerScoring(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
td = models.IntegerField(default=0)
fg_att = models.IntegerField(default=0)
fg_made = models.IntegerField(default=0)
pat_att = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_pt_att = models.IntegerField(default=0)
two_pt_made = models.IntegerField(default=0)
def_pat_att = models.IntegerField(default=0)
def_pat_made = models.IntegerField(default=0)
def_two_pt_att = models.IntegerField(default=0)
def_two_pt_made = models.IntegerField(default=0)
safeties = models.IntegerField(default=0)
points = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerTackle(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
unassisted_tackles = models.IntegerField(default=0)
assisted_tackles = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def total_tackles(self):
return self.unassisted_tackles+self.assisted_tackles
class PlayerTacklesLoss(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
unassisted_tackles_for_loss = models.IntegerField(default=0)
assisted_tackles_for_loss = models.IntegerField(default=0)
tackles_for_loss_yards = models.IntegerField(default=0)
unassisted_sacks = models.IntegerField(default=0)
assisted_sacks = models.IntegerField(default=0)
sack_yards = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def total_sacks(self):
return self.unassisted_sacks+self.assisted_sacks
def total_tackles_for_loss(self):
return self.unassisted_tackles_for_loss+self.assisted_tackles_for_loss
class Meta:
verbose_name_plural = 'player tackles for loss'
class PlayerPassDefense(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
interceptions = models.IntegerField(default=0)
interception_yards = models.IntegerField(default=0)
interception_td = models.IntegerField(default=0)
pass_breakups = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerFumble(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
fumbles_forced = models.IntegerField(default=0)
fumbles_number = models.IntegerField(default=0)
fumbles_yards = models.IntegerField(default=0)
fumbles_td = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerReturn(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_td = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_td = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerSummary(models.Model):
player = models.ForeignKey(Player)
rushes = models.IntegerField(null=True)
rush_gain = models.IntegerField(null=True)
rush_loss = models.IntegerField(null=True)
rush_net = models.IntegerField(null=True)
rush_td = models.IntegerField(null=True)
pass_attempts = models.IntegerField(null=True)
pass_complete = models.IntegerField(null=True)
pass_intercept = models.IntegerField(null=True)
pass_yards = models.IntegerField(null=True)
pass_td = models.IntegerField(null=True)
conversions = models.IntegerField(null=True)
offense_plays = models.IntegerField(null=True)
offense_yards = models.IntegerField(null=True)
receptions = models.IntegerField(null=True)
reception_yards = models.IntegerField(null=True)
reception_td = models.IntegerField(null=True)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.player.season)
class Poll(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50)
def __unicode__(self):
return self.name
class PollResults(models.Model):
poll = models.ForeignKey(Poll)
week = models.ForeignKey(Week)
team = models.ForeignKey(College)
rank = models.IntegerField()
def __unicode__(self):
return "%s: %s %s" % (self.poll, self.week, self.team)
| |
"""
Copyright 2014 Sotera Defense Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import threading
from Queue import Queue
from Queue import Empty
from impala.dbapi import connect
import tangelo
from entity import Entity
from datawake.conf import datawakeconfig
from datawake.util.dataconnector.data_connector import DataConnector
THREADS_PER_HOST = 2
class ImpalaQueryThread(threading.Thread):
def __init__(self, host, port, q, do_work):
threading.Thread.__init__(self)
self.host = host
self.port = port
self.q = q
self.do_work = do_work
def run(self):
cnx = connect(host=self.host, port=self.port)
cursor = cnx.cursor()
try:
while True:
work_item = self.q.get(block=False)
self.do_work(cursor, work_item)
except Empty:
pass
finally:
cnx.close()
class ClusterEntityDataConnector(DataConnector):
"""Provides connection to local mysql database for extracted entity data"""
def __init__(self, config):
DataConnector.__init__(self)
self.config = config
self.cnx = None
self.lock = threading.Lock()
def open(self):
host = random.choice(self.config['hosts'])
self.cnx = connect(host=host, port=self.config['port'])
def close(self):
if self.cnx is not None:
try:
self.cnx.close()
except:
pass
finally:
self.cnx = None
def _check_conn(self):
if self.cnx is None:
self.open()
def queue_impala_query(self, result_method, results, work_item_iterator):
q = Queue()
for work_item in work_item_iterator():
q.put(work_item)
# define the work function
lock = threading.Lock()
def work_method(cursor, work_item):
cursor.execute(work_item['sql'], work_item['params'])
for row in cursor:
result_method(row, lock, results)
threads = []
hosts = self.config['hosts']
max_threads = THREADS_PER_HOST * len(hosts)
total_work = q.qsize()
if total_work < len(hosts):
hosts = random.sample(hosts, total_work)
else:
while len(hosts) < max_threads and total_work > len(hosts):
diff = total_work - len(hosts)
diff = min(diff, self.config['hosts'])
diff = min(diff, max_threads - len(hosts))
hosts.extend(random.sample(self.config['hosts'], diff))
for host in hosts:
t = ImpalaQueryThread(host, self.config['port'], q, work_method)
t.start()
threads.append(t)
# execute with all threads
for thread in threads:
thread.join()
return results
def get_extracted_entities_from_urls(self, urls, type=None):
def work_item_iterator():
sql = "select rowkey from general_extractor_web_index "
for url in urls:
work_item = {}
rowkey = "%s\0" % url
work_item['sql'] = sql + " where rowkey >= %(startkey)s and rowkey < %(endkey)s "
work_item['params'] = {'startkey': rowkey, 'endkey': rowkey + "~"}
yield work_item
# define the work function
def append_to_list(row, lock, results):
tokens = row[0].split("\0")
url = tokens[0]
attr = tokens[1]
value = tokens[2]
with lock:
if url not in results:
results[url] = {}
if attr not in results[url]:
results[url][attr] = [value]
else:
results[url][attr].append(value)
results = {}
return self.queue_impala_query(append_to_list, results, work_item_iterator)
def get_extracted_entities_with_domain_check(self, urls, types=[], domain='default'):
return DataConnector.get_extracted_entities_with_domain_check(self, urls, types, domain)
# # DOMAINS ####
def get_domain_items(self, name, limit):
self._check_conn()
cursor = self.cnx.cursor()
sql = "select rowkey from %(table)s where rowkey >= %(startkey)s and rowkey < %(endkey)s limit %(limit)s"
params = {
'startkey': name + '\0',
'endkey': name + '\0~',
'limit': limit,
'table': datawakeconfig.DOMAIN_VALUES_TABLE
}
try:
cursor.execute(sql, params)
rows = cursor.fetchall()
except:
self.close()
raise
return map(lambda x: x[0], rows)
def get_domain_entity_matches(self, domain, type, values):
def work_item_iterator():
sql = "select rowkey from datawake_domain_entities "
for value in values:
work_item = {}
rowkey = "%s\0%s\0%s" % (domain, type, value)
work_item['sql'] = sql + " where rowkey >= %(startkey)s and rowkey < %(endkey)s "
work_item['params'] = {'startkey': rowkey, 'endkey': rowkey + "~"}
yield work_item
def append_to_list(row, lock, results):
tokens = row[0].split("\0")
value = tokens[2]
with lock:
results.append(value)
results = []
return self.queue_impala_query(append_to_list, results, work_item_iterator)
def get_extracted_domain_entities_from_urls(self, domain, urls, type=None):
def work_item_iterator():
sql = "select rowkey from datawake_domain_entities "
for url in urls:
work_item = {}
rowkey = "%s\0%s\0" % (domain, url)
work_item['sql'] = sql + " where rowkey >= %(startkey)s and rowkey < %(endkey)s "
work_item['params'] = {'startkey': rowkey, 'endkey': rowkey + "~"}
yield work_item
def append_to_list(row, lock, results):
tokens = row[0].split("\0")
url = tokens[1]
type = tokens[2]
value = tokens[3]
with lock:
if url not in results:
results[url] = {}
if type not in results[url]:
results[url][type] = [value]
else:
results[url][type].append(value)
results = {}
return self.queue_impala_query(append_to_list, results, work_item_iterator)
def get_extracted_domain_entities_for_urls(self, domain, urls):
def work_item_iterator():
sql = "select rowkey from datawake_domain_entities "
for url in urls:
work_item = {}
rowkey = "%s\0%s\0" % (domain, url)
work_item['sql'] = sql + " where rowkey >= %(startkey)s and rowkey < %(endkey)s "
work_item['params'] = {'startkey': rowkey, 'endkey': rowkey + "~"}
yield work_item
def append_to_list(row, lock, results):
tokens = row[0].split("\0")
value = tokens[3]
with lock:
results.append(value)
results = []
return self.queue_impala_query(append_to_list, results, work_item_iterator)
def get_extracted_entities_list_from_urls(self, urls):
def work_item_iterator():
sql = "select rowkey from datawake_domain_entities "
for url in urls:
work_item = {}
rowkey = "%s\0" % url
work_item['sql'] = sql + " where rowkey >= %(startkey)s and rowkey < %(endkey)s "
work_item['params'] = {'startkey': rowkey, 'endkey': rowkey + "~"}
yield work_item
def append_to_list(row, lock, results):
with lock:
results.append(row[0])
results = []
return self.queue_impala_query(append_to_list, results, work_item_iterator)
def get_matching_entities_from_url(self, urls):
entities = self.get_matching_entities_from_url(urls)
url_dict = dict()
for url in urls:
url_dict[url] = set()
def new_entity(x):
values = x.split("\0")
if len(values) == 3:
url_dict[values[0]].add(Entity(dict(type=values[1], name=values[2])))
else:
tangelo.log(",".join(values))
map(lambda x: new_entity(x), entities)
vals = url_dict.values()
return map(lambda entity: entity.item["name"], set.intersection(*vals))
# TODO: Might be able to remove this. No inserts or deletions through Impala
def delete_domain_items(self, domain_name):
return DataConnector.delete_domain_items(self, domain_name)
def add_new_domain_items(self, domain_items):
return DataConnector.add_new_domain_items(self, domain_items)
| |
""" ArnoldC -> Python translator
This file includes abstract model of blocks and statements.
"""
import reserved_words as rword
#Abstract syntax model
#---------------------
class Runnables(object):
"""Abstract definition of runnable blocks/statements"""
def __init__(self):
raise NotImplementedError
def get_parsed_structure(self):
raise NotImplementedError
class Block(Runnables):
"""Common constructor and methods that Blocks have"""
"""Main, If and While are included."""
def __init__(self):
self.child = []
def add_child(self, child):
self.child.append(child)
return self.child[-1]
class Statement(Runnables):
"""Common definition of Statements (No longer needed?)"""
def __init__(self):
pass
#Concrete blocks/statements model
#--------------------------------
class Main(Block):
"""Main method"""
def __init__(self):
super().__init__()
def get_parsed_structure(self):
s = ""
for i in self.child:
if type(i) in [type(If), type(While)]:
s += i.get_parsed_structure(nest_lv=1)
else:
s += "".join([i.get_parsed_structure(), "\n"])
while (s[-1], s[-2]) == ("\n", "\n") :
s = s[:-1]
return s
class If(Block):
"""If block"""
def __init__(self, exp):
super().__init__()
self.value = exp
def add_else(self):
self.child.append("else")
def has_else(self):
return "else" in self.child
def get_parsed_structure(self, nest_lv=0):
s = "".join([" " * nest_lv, "if %s:\n" % GetEvalExpression(self.value)])
for i in self.child:
if i == "else":
s += "".join([" " * nest_lv, "else:\n"])
elif type(i) in [type(If("")), type(While(""))]:
s += i.get_parsed_structure(nest_lv=nest_lv+1)
else:
s += "".join([" " * (nest_lv+1), i.get_parsed_structure(), "\n"])
return s
class While(Block):
"""While block"""
def __init__(self, exp):
super().__init__()
self.value = exp
def get_parsed_structure(self, nest_lv=0):
s = "".join([" " * nest_lv, "while %s:\n" % GetEvalExpression(self.value)])
for i in self.child:
if type(i) in [type(If("")), type(While(""))]:
s += i.get_parsed_structure(nest_lv=nest_lv+1)
else:
s += "".join([" " * (nest_lv+1), i.get_parsed_structure(), "\n"])
return s
class Print(Statement):
"""Print statement"""
def __init__(self, string):
self.string = string
def get_parsed_structure(self):
return "".join(["print(", self.string, ")"])
class DeclaringVariable(Statement):
"""Variable declaration"""
def __init__(self, name, value):
self.name, self.value = name, value
def get_parsed_structure(self):
return "".join([self.name, " = ", str(self.value)])
class Expression(Statement):
"""Expression recognizer class"""
"""It inherits Statement class but it's not a statement."""
"""It's used to construct the right side of equation."""
def __init__(self, args, operations):
self.args = args
self.operations = operations
self.operations.insert(0, "")
#To avoid calculate first arg with nothing
def get_parsed_structure(self):
s = ""
for (i, j) in zip(self.operations, self.args):
s = "".join(["(", s, i, j, ")"])
return s
class AssigningValue(Statement):
"""Value assign statement"""
"""It uses Expression class to get the right side of equation."""
def __init__(self, name, args, operations):
self.name = name
self.exp = Expression(args, operations)
def get_parsed_structure(self):
s = self.exp.get_parsed_structure()
return "".join([self.name, " = ", s])
#Functions for syntax analysis
#-----------------------------
def GetOprAndArgs(l):
"""Extract the operation and its arguments from line."""
r = rword.ReservedWords()
lsp = set(l.split())
opr = ""
for i in r.word.values():
isp = set(i.split())
if lsp & isp == isp:
opr = " ".join( l.split()[:-len(lsp - isp)] )
if opr == "":
return " ".join(l.split()), "<NONE>"
arg = " ".join( l.split()[len(opr.split()):] )
return opr, arg
def GetEndOfBlock(code, end_op):
"""Get the last line of block."""
"""It returns -1 if it can't find."""
for i in code:
if end_op in i:
return code.index(i)
else:
return -1
def GetArithmeticMembers(code, operator):
"""Get members and operators used in equation."""
op_list = []
arg_list = []
for i in code:
op = " ".join(i.split()[:-1])
arg = i.split()[-1]
if op in operator.keys():
arg_list.append(arg)
op_list.append(operator[op])
return op_list, arg_list
def ReplaceMacros(code):
"""Replace macro words."""
w = rword.ReservedWords()
code = code.replace(w.word["1"], "1")
code = code.replace(w.word["0"], "0")
return code
def GetEvalExpression(value):
"""Generate evaluation formula."""
"""In ArnoldC, 0 means True and other numbers mean False."""
"""To follow ArnoldC's evaluation rule, it's little complicated."""
return "(%s if type(%s) == type(bool()) else %s > 0)" % tuple([value]*3)
#Main translator function
#------------------------
def Translate(inp, debug=False):
"""Translate the ArnoldC code in Python."""
code = [ReplaceMacros(x) for x in inp.readlines()]
w = rword.ReservedWords()
tree = None
stack = [None]
ptr = None
pc = 0
WTFException = rword.WhatTheFuckDidIDoWrong
while True:
#Get a line of program
try:
l = code[pc]
except IndexError:
raise WTFException(pc+1, "unexpected EOF")
else:
if l[-1] == "\n":
l = l[:-1]
op, arg = GetOprAndArgs(l)
#Remove \n code
try:
l_ = code[pc+1]
except IndexError:
pass
else:
if l_[-1] == "\n":
l_ = l_[:-1]
op_, arg_ = GetOprAndArgs(l_)
if debug:
print("l:", l)
print("op:", op)
print("arg:", arg)
print("")
print("l_:", l_)
print("op_:", op_)
print("arg_:", arg_)
print("\n")
if w.word["Main"] == op:
if ptr == None:
tree = Main()
ptr = tree
else:
raise WTFException(pc+1, "attempted to begin Main method in another method")
elif w.word["Main_end"] == op:
if type(ptr) == type(Main()):
out = ptr.get_parsed_structure()
if debug:
print(out)
return out
else:
raise WTFException(pc+1, "unexpected end of Main: " + str(type(ptr)))
elif w.word["If"] == op:
stack.append(ptr)
ptr = ptr.add_child(If(arg))
elif w.word["Else"] == op:
if type(ptr) == type(If("")):
if ptr.has_else() == False:
ptr.add_else()
else:
raise WTFException(pc+1, "there is already Else before this")
else:
raise WTFException(pc+1, "there is no If before Else:")
elif w.word["While"] == op:
stack.append(ptr)
ptr = ptr.add_child(While(arg))
elif op in [w.word["If_end"], w.word["While_end"]]:
ptr = stack.pop()
elif w.word["Print"] == op:
ptr.add_child(Print(arg))
elif (w.word["DecVar"] == op) & (w.word["DecVar_value"] == op_):
ptr.add_child(DeclaringVariable(arg, arg_))
pc += 1
elif (w.word["AssignVar"] == op) & (w.word["AssignVar_opr"] == op_):
pc += 1
offset = GetEndOfBlock(code[pc:], w.word["AssignVar_end"])
b = code[pc:pc + offset]
op_list, arg_list = GetArithmeticMembers(b, w.operator)
ptr.add_child(AssigningValue(arg, [arg_] + arg_list, op_list))
pc += offset
elif op == "":
pass
else:
raise WTFException(pc+1, "unknown: \"%s\"" % op)
pc += 1
| |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Docker component launcher which launches a container in docker environment ."""
import collections
from typing import Any, Dict, List, Optional, cast
from absl import logging
from kubernetes import client
from tfx.dsl.compiler import placeholder_utils
from tfx.dsl.component.experimental import executor_specs
from tfx.orchestration.launcher import container_common
from tfx.orchestration.portable import base_executor_operator
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.utils import kube_utils
from google.protobuf import message
class KubernetesExecutorOperator(base_executor_operator.BaseExecutorOperator):
"""Responsible for launching a container executor on Kubernetes."""
SUPPORTED_EXECUTOR_SPEC_TYPE = [executable_spec_pb2.ContainerExecutableSpec]
SUPPORTED_PLATFORM_CONFIG_TYPE = []
def __init__(self,
executor_spec: message.Message,
platform_config: Optional[message.Message] = None):
super().__init__(executor_spec, platform_config)
self._container_executor_spec = cast(
executable_spec_pb2.ContainerExecutableSpec, self._executor_spec)
def run_executor(
self, execution_info: data_types.ExecutionInfo
) -> execution_result_pb2.ExecutorOutput:
"""Execute underlying component implementation.
Runs executor container in a Kubernetes Pod and wait until it goes into
`Succeeded` or `Failed` state.
Args:
execution_info: All the information that the launcher provides.
Raises:
RuntimeError: when the pod is in `Failed` state or unexpected failure from
Kubernetes API.
Returns:
An ExecutorOutput instance
"""
context = placeholder_utils.ResolutionContext(
exec_info=execution_info,
executor_spec=self._executor_spec,
platform_config=self._platform_config)
container_spec = executor_specs.TemplatedExecutorContainerSpec(
image=self._container_executor_spec.image,
command=[
placeholder_utils.resolve_placeholder_expression(cmd, context)
for cmd in self._container_executor_spec.commands
] or None,
args=[
placeholder_utils.resolve_placeholder_expression(arg, context)
for arg in self._container_executor_spec.args
] or None,
)
pod_name = self._build_pod_name(execution_info)
# TODO(hongyes): replace the default value from component config.
try:
namespace = kube_utils.get_kfp_namespace()
except RuntimeError:
namespace = 'kubeflow'
pod_manifest = self._build_pod_manifest(pod_name, container_spec)
core_api = kube_utils.make_core_v1_api()
if kube_utils.is_inside_kfp():
launcher_pod = kube_utils.get_current_kfp_pod(core_api)
pod_manifest['spec']['serviceAccount'] = launcher_pod.spec.service_account
pod_manifest['spec'][
'serviceAccountName'] = launcher_pod.spec.service_account_name
pod_manifest['metadata'][
'ownerReferences'] = container_common.to_swagger_dict(
launcher_pod.metadata.owner_references)
else:
pod_manifest['spec']['serviceAccount'] = kube_utils.TFX_SERVICE_ACCOUNT
pod_manifest['spec'][
'serviceAccountName'] = kube_utils.TFX_SERVICE_ACCOUNT
logging.info('Looking for pod "%s:%s".', namespace, pod_name)
resp = kube_utils.get_pod(core_api, pod_name, namespace)
if not resp:
logging.info('Pod "%s:%s" does not exist. Creating it...',
namespace, pod_name)
logging.info('Pod manifest: %s', pod_manifest)
try:
resp = core_api.create_namespaced_pod(
namespace=namespace, body=pod_manifest)
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to created container executor pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
# Wait up to 300 seconds for the pod to move from pending to another status.
logging.info('Waiting for pod "%s:%s" to start.', namespace, pod_name)
kube_utils.wait_pod(
core_api,
pod_name,
namespace,
exit_condition_lambda=kube_utils.pod_is_not_pending,
condition_description='non-pending status',
timeout_sec=300)
logging.info('Start log streaming for pod "%s:%s".', namespace, pod_name)
try:
logs = core_api.read_namespaced_pod_log(
name=pod_name,
namespace=namespace,
container=kube_utils.ARGO_MAIN_CONTAINER_NAME,
follow=True,
_preload_content=False).stream()
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to stream the logs from the pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
for log in logs:
logging.info(log.decode().rstrip('\n'))
# Wait indefinitely for the pod to complete.
resp = kube_utils.wait_pod(
core_api,
pod_name,
namespace,
exit_condition_lambda=kube_utils.pod_is_done,
condition_description='done state')
if resp.status.phase == kube_utils.PodPhase.FAILED.value:
raise RuntimeError('Pod "%s:%s" failed with status "%s".' %
(namespace, pod_name, resp.status))
logging.info('Pod "%s:%s" is done.', namespace, pod_name)
return execution_result_pb2.ExecutorOutput()
def _build_pod_manifest(
self, pod_name: str,
container_spec: executor_specs.TemplatedExecutorContainerSpec
) -> Dict[str, Any]:
"""Build a pod spec.
The function builds a pod spec by patching executor container spec into
the pod spec from component config.
Args:
pod_name: The name of the pod.
container_spec: The resolved executor container spec.
Returns:
The pod manifest in dictionary format.
"""
pod_manifest = collections.defaultdict(dict)
pod_manifest.update({
'apiVersion': 'v1',
'kind': 'Pod',
})
# TODO(hongyes): figure out a better way to figure out type hints for nested
# dict.
metadata = pod_manifest['metadata']
metadata.update({'name': pod_name})
spec = pod_manifest['spec']
spec.update({'restartPolicy': 'Never'})
containers = spec.setdefault('containers', []) # type: List[Dict[str, Any]]
container = None # type: Optional[Dict[str, Any]]
for c in containers:
if c['name'] == kube_utils.ARGO_MAIN_CONTAINER_NAME:
container = c
break
if not container:
container = {'name': kube_utils.ARGO_MAIN_CONTAINER_NAME}
containers.append(container)
container.update({
'image': container_spec.image,
'command': container_spec.command,
'args': container_spec.args,
})
return pod_manifest
def _build_pod_name(self, execution_info: data_types.ExecutionInfo) -> str:
pipeline_name = (
execution_info.pipeline_info.id[:50] + '-' +
execution_info.pipeline_run_id[:50])
pod_name = '%s-%s-%s' % (pipeline_name,
execution_info.pipeline_node.node_info.id[:50],
execution_info.execution_id)
return kube_utils.sanitize_pod_name(pod_name)
| |
import warnings
from os.path import dirname, join
import numpy as np
import pandas as pd
from nose.tools import assert_almost_equal, assert_equal
from numpy.random import RandomState
from numpy.testing import assert_array_equal
from pandas.testing import assert_series_equal
from rsmtool.analyzer import Analyzer
class TestAnalyzer:
def setUp(self):
self.prng = RandomState(133)
self.df_features = pd.DataFrame({'sc1': [1, 2, 3, 4, 1, 2, 3, 4, 1, 2],
'f1': self.prng.normal(0, 1, 10),
'f2': self.prng.normal(1, 0.1, 10),
'f3': self.prng.normal(2, 0.1, 10),
'group': ['group1'] * 10},
index=range(0, 10))
self.df_features_same_score = self.df_features.copy()
self.df_features_same_score['sc1'] = [3] * 10
self.df_features_with_groups = self.df_features.copy()
self.df_features_with_groups['group'] = ['group1']*5 + ['group2']*5
self.df_features_with_groups_and_length = self.df_features_with_groups.copy()
self.df_features_with_groups_and_length['length'] = self.prng.normal(50, 250, 10)
self.human_scores = pd.Series(self.prng.randint(1, 5, size=10))
self.system_scores = pd.Series(self.prng.random_sample(10) * 5)
self.same_human_scores = pd.Series([3] * 10)
# get the directory containing the tests
self.test_dir = dirname(__file__)
def test_correlation_helper(self):
# test that there are no nans for data frame with 10 values
retval = Analyzer.correlation_helper(self.df_features, 'sc1', 'group')
assert_equal(retval[0].isnull().values.sum(), 0)
assert_equal(retval[1].isnull().values.sum(), 0)
def test_correlation_helper_for_data_with_one_row(self):
# this should return two data frames with nans
retval = Analyzer.correlation_helper(self.df_features[:1], 'sc1', 'group')
assert_equal(retval[0].isnull().values.sum(), 3)
assert_equal(retval[1].isnull().values.sum(), 3)
def test_correlation_helper_for_data_with_two_rows(self):
# this should return 1/-1 for marginal correlations and nans for
# partial correlations
retval = Analyzer.correlation_helper(self.df_features[:2], 'sc1', 'group')
assert_equal(abs(retval[0].values).sum(), 3)
assert_equal(retval[1].isnull().values.sum(), 3)
def test_correlation_helper_for_data_with_three_rows(self):
# this should compute marginal correlations but return Nans for
# partial correlations
retval = Analyzer.correlation_helper(self.df_features[:3], 'sc1', 'group')
assert_equal(retval[0].isnull().values.sum(), 0)
assert_equal(retval[1].isnull().values.sum(), 3)
def test_correlation_helper_for_data_with_four_rows(self):
# this should compute marginal correlations and return a unity
# matrix for partial correlations
# it should also raise a UserWarning
with warnings.catch_warnings(record=True) as warning_list:
retval = Analyzer.correlation_helper(self.df_features[:4], 'sc1', 'group')
assert_equal(retval[0].isnull().values.sum(), 0)
assert_almost_equal(np.abs(retval[1].values).sum(), 0.9244288637889855)
assert issubclass(warning_list[-1].category, UserWarning)
def test_correlation_helper_for_data_with_groups(self):
retval = Analyzer.correlation_helper(self.df_features_with_groups, 'sc1', 'group')
assert_equal(len(retval[0]), 2)
assert_equal(len(retval[1]), 2)
def test_correlation_helper_for_one_group_with_one_row(self):
# this should return a data frames with nans for group with 1 row
retval = Analyzer.correlation_helper(self.df_features_with_groups[:6], 'sc1', 'group')
assert_equal(len(retval[0]), 2)
assert_equal(len(retval[1]), 2)
assert_equal(retval[0].isnull().values.sum(), 3)
def test_correlation_helper_for_groups_and_length(self):
retval = Analyzer.correlation_helper(self.df_features_with_groups_and_length,
'sc1', 'group', include_length=True)
for df in retval:
assert_equal(len(df), 2)
assert_equal(len(df.columns), 3)
def test_correlation_helper_for_group_with_one_row_and_length(self):
# this should return a data frames with nans for group with 1 row
retval = Analyzer.correlation_helper(self.df_features_with_groups_and_length[:6],
'sc1', 'group', include_length=True)
for df in retval:
assert_equal(len(df), 2)
assert_equal(len(df.columns), 3)
def test_that_correlation_helper_works_for_data_with_the_same_human_score(self):
# this test should raise UserWarning because the determinant is very close to
# zero. It also raises Runtime warning because
# variance of human scores is 0.
with warnings.catch_warnings(record=True) as warning_list:
warnings.filterwarnings('ignore', category=RuntimeWarning)
retval = Analyzer.correlation_helper(self.df_features_same_score, 'sc1', 'group')
assert_equal(retval[0].isnull().values.sum(), 3)
assert_equal(retval[1].isnull().values.sum(), 3)
assert issubclass(warning_list[-1].category, UserWarning)
def test_that_metrics_helper_works_for_data_with_one_row(self):
# There should be NaNs for SMD, correlations and both sds
# note that we will get a value for QWK since we are
# dividing by N and not N-1
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
evals = Analyzer.metrics_helper(self.human_scores[0:1],
self.system_scores[0:1])
assert_equal(evals.isnull().values.sum(), 5)
def test_that_metrics_helper_works_for_data_with_the_same_label(self):
# There should be NaNs for correlation and SMD.
# Note that for a dataset with a single response
# kappas will be 0 or 1
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
evals = Analyzer.metrics_helper(self.same_human_scores,
self.system_scores)
assert_equal(evals.isnull().values.sum(), 2)
def test_metrics_helper_population_sds(self):
df_new_features = pd.read_csv(join(self.test_dir, 'data', 'files', 'train.csv'))
# compute the metrics when not specifying the population SDs
computed_metrics1 = Analyzer.metrics_helper(df_new_features['score'],
df_new_features['score2'])
expected_metrics1 = pd.Series({'N': 500.0,
'R2': 0.65340566606389394,
'RMSE': 0.47958315233127197,
'SMD': 0.03679030063229779,
'adj_agr': 100.0,
'corr': 0.82789026370069529,
'exact_agr': 77.0,
'h_max': 6.0,
'h_mean': 3.4199999999999999,
'h_min': 1.0,
'h_sd': 0.81543231461565147,
'kappa': 0.6273493195074531,
'sys_max': 6.0,
'sys_mean': 3.4500000000000002,
'sys_min': 1.0,
'sys_sd': 0.81782496620652367,
'wtkappa': 0.8273273273273274})
# and now compute them specifying the population SDs
computed_metrics2 = Analyzer.metrics_helper(df_new_features['score'],
df_new_features['score2'],
population_human_score_sd=0.5,
population_system_score_sd=0.4,
smd_method='williamson')
# the only number that should change is the SMD
expected_metrics2 = expected_metrics1.copy()
expected_metrics2['SMD'] = 0.066259
assert_series_equal(computed_metrics1.sort_index(), expected_metrics1.sort_index())
assert_series_equal(computed_metrics2.sort_index(), expected_metrics2.sort_index())
def test_metrics_helper_zero_system_sd(self):
human_scores = [1, 3, 4, 2, 3, 1, 3, 4, 2, 1]
system_score = [2.54] * 10
computed_metrics1 = Analyzer.metrics_helper(human_scores,
system_score)
expected_metrics1 = pd.Series({'N': 10,
'R2': -0.015806451612903283,
'RMSE': 1.122319027727856,
'SMD': 0.11927198519188371,
'adj_agr': 50.0,
'corr': None,
'exact_agr': 0,
'h_max': 4,
'h_mean': 2.4,
'h_min': 1.0,
'h_sd': 1.1737877907772674,
'kappa': 0,
'sys_max': 2.54,
'sys_mean': 2.54,
'sys_min': 2.54,
'sys_sd': 0,
'wtkappa': 0})
# now compute DSM
computed_metrics2 = Analyzer.metrics_helper(human_scores,
system_score,
use_diff_std_means=True)
# the only number that should change is the SMD
expected_metrics2 = expected_metrics1.copy()
expected_metrics2.drop("SMD", inplace=True)
expected_metrics2['DSM'] = None
assert_series_equal(computed_metrics1.sort_index(),
expected_metrics1.sort_index(),
check_dtype=False)
assert_series_equal(computed_metrics2.sort_index(),
expected_metrics2.sort_index(),
check_dtype=False)
def test_compute_pca_less_samples_than_features(self):
# test pca when we have less samples than
# features. In this case the number of components
# equals to the number of samples.
dfs = []
# to avoid inserting too many columns,
# we create a list of data frames and then
# concatenate them together
for i in range(1, 101):
dfs.append(pd.DataFrame({i: pd.Series(range(50)) * i}))
df = pd.concat(dfs, axis=1)
(components, variance) = Analyzer.compute_pca(df, df.columns)
assert_equal(len(components.columns), 50)
assert_equal(len(variance.columns), 50)
def test_compute_disattenuated_correlations_single_human(self):
hm_corr = pd.Series([0.9, 0.8, 0.6],
index=['raw', 'raw_trim', 'raw_trim_round'])
hh_corr = pd.Series([0.81], index=[''])
df_dis_corr = Analyzer.compute_disattenuated_correlations(hm_corr,
hh_corr)
assert_equal(len(df_dis_corr), 3)
assert_equal(df_dis_corr.loc['raw', 'corr_disattenuated'], 1.0)
def test_compute_disattenuated_correlations_matching_human(self):
hm_corr = pd.Series([0.9, 0.4, 0.6],
index=['All data', 'GROUP1', 'GROUP2'])
hh_corr = pd.Series([0.81, 0.64, 0.36],
index=['All data', 'GROUP1', 'GROUP2'])
df_dis_corr = Analyzer.compute_disattenuated_correlations(hm_corr,
hh_corr)
assert_equal(len(df_dis_corr), 3)
assert_array_equal(df_dis_corr['corr_disattenuated'], [1.0, 0.5, 1.0])
def test_compute_disattenuated_correlations_single_matching_human(self):
hm_corr = pd.Series([0.9, 0.4, 0.6],
index=['All data', 'GROUP1', 'GROUP2'])
hh_corr = pd.Series([0.81],
index=['All data'])
df_dis_corr = Analyzer.compute_disattenuated_correlations(hm_corr,
hh_corr)
assert_equal(len(df_dis_corr), 3)
assert_array_equal(df_dis_corr['corr_disattenuated'], [1.0, np.nan, np.nan])
def test_compute_disattenuated_correlations_mismatched_indices(self):
hm_corr = pd.Series([0.9, 0.6],
index=['All data', 'GROUP2'])
hh_corr = pd.Series([0.81, 0.64],
index=['All data', 'GROUP1'])
df_dis_corr = Analyzer.compute_disattenuated_correlations(hm_corr,
hh_corr)
assert_equal(len(df_dis_corr), 3)
assert_array_equal(df_dis_corr['corr_disattenuated'], [1.0, np.nan, np.nan])
def test_compute_disattenuated_correlations_negative_human(self):
hm_corr = pd.Series([0.9, 0.8],
index=['All data', 'GROUP1'])
hh_corr = pd.Series([-0.03, 0.64],
index=['All data', 'GROUP1'])
df_dis_corr = Analyzer.compute_disattenuated_correlations(hm_corr,
hh_corr)
assert_equal(len(df_dis_corr), 2)
assert_array_equal(df_dis_corr['corr_disattenuated'], [np.nan, 1.0])
| |
from __future__ import unicode_literals
import datetime
import decimal
import hashlib
import logging
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
for item in self.cursor:
yield item
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Close instead of passing through to avoid backend-specific behavior
# (#17671). Catch errors liberally because errors in cleanup code
# aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super(CursorDebugWrapper, self).execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries_log.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, params,
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super(CursorDebugWrapper, self).executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries_log.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, param_list,
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int((microseconds + '000000')[:6]))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(
int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo
)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def split_identifier(identifier):
"""
Split a SQL identifier into a two element tuple of (namespace, name).
The identifier could be a table, column, or sequence name might be prefixed
by a namespace.
"""
try:
namespace, name = identifier.split('"."')
except ValueError:
namespace, name = '', identifier
return namespace.strip('"'), name.strip('"')
def truncate_name(identifier, length=None, hash_len=4):
"""
Shorten a SQL identifier to a repeatable mangled version with the given
length.
If a quote stripped name contains a namespace, e.g. USERNAME"."TABLE,
truncate the table portion only.
"""
namespace, name = split_identifier(identifier)
if length is None or len(name) <= length:
return identifier
digest = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
return '%s%s%s' % ('%s"."' % namespace if namespace else '', name[:length - hash_len], digest)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(decimal.Decimal(".1") ** decimal_places, context=context)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
if decimal_places is not None:
return "%.*f" % (decimal_places, value)
return "{:f}".format(value)
def strip_quotes(table_name):
"""
Strip quotes off of quoted table names to make them safe for use in index
names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming
scheme) becomes 'USER"."TABLE'.
"""
has_quotes = table_name.startswith('"') and table_name.endswith('"')
return table_name[1:-1] if has_quotes else table_name
| |
import hashlib
import os
import pytest
from funcy import first
from dvc.exceptions import DvcException
from dvc.utils.fs import remove
def digest(text):
return hashlib.md5(bytes(text, "utf-8")).hexdigest()
def test_no_scm(tmp_dir, dvc):
from dvc.scm.base import NoSCMError
tmp_dir.dvc_gen("file", "text")
with pytest.raises(NoSCMError):
dvc.diff()
def test_added(tmp_dir, scm, dvc):
tmp_dir.dvc_gen("file", "text")
assert dvc.diff() == {
"added": [{"path": "file", "hash": digest("text")}],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
def test_no_cache_entry(tmp_dir, scm, dvc):
tmp_dir.dvc_gen("file", "first", commit="add a file")
tmp_dir.dvc_gen({"dir": {"1": "1", "2": "2"}})
tmp_dir.dvc_gen("file", "second")
remove(tmp_dir / ".dvc" / "cache")
dir_checksum = "5fb6b29836c388e093ca0715c872fe2a.dir"
assert dvc.diff() == {
"added": [
{"path": os.path.join("dir", ""), "hash": dir_checksum},
{"path": os.path.join("dir", "1"), "hash": digest("1")},
{"path": os.path.join("dir", "2"), "hash": digest("2")},
],
"deleted": [],
"modified": [
{
"path": "file",
"hash": {"old": digest("first"), "new": digest("second")},
}
],
"not in cache": [],
"renamed": [],
}
@pytest.mark.parametrize("delete_data", [True, False])
def test_deleted(tmp_dir, scm, dvc, delete_data):
tmp_dir.dvc_gen("file", "text", commit="add file")
(tmp_dir / "file.dvc").unlink()
if delete_data:
(tmp_dir / "file").unlink()
assert dvc.diff() == {
"added": [],
"deleted": [{"path": "file", "hash": digest("text")}],
"modified": [],
"not in cache": [],
"renamed": [],
}
def test_modified(tmp_dir, scm, dvc):
tmp_dir.dvc_gen("file", "first", commit="first version")
tmp_dir.dvc_gen("file", "second")
assert dvc.diff() == {
"added": [],
"deleted": [],
"modified": [
{
"path": "file",
"hash": {"old": digest("first"), "new": digest("second")},
}
],
"not in cache": [],
"renamed": [],
}
def test_refs(tmp_dir, scm, dvc):
tmp_dir.dvc_gen("file", "first", commit="first version")
tmp_dir.dvc_gen("file", "second", commit="second version")
tmp_dir.dvc_gen("file", "third", commit="third version")
HEAD_2 = digest("first")
HEAD_1 = digest("second")
HEAD = digest("third")
assert dvc.diff("HEAD~1") == {
"added": [],
"deleted": [],
"modified": [{"path": "file", "hash": {"old": HEAD_1, "new": HEAD}}],
"not in cache": [],
"renamed": [],
}
assert dvc.diff("HEAD~2", "HEAD~1") == {
"added": [],
"deleted": [],
"modified": [{"path": "file", "hash": {"old": HEAD_2, "new": HEAD_1}}],
"not in cache": [],
"renamed": [],
}
with pytest.raises(DvcException, match=r"unknown Git revision 'missing'"):
dvc.diff("missing")
def test_directories(tmp_dir, scm, dvc):
tmp_dir.dvc_gen({"dir": {"1": "1", "2": "2"}}, commit="add a directory")
tmp_dir.dvc_gen({"dir": {"3": "3"}}, commit="add a file")
tmp_dir.dvc_gen({"dir": {"2": "two"}}, commit="modify a file")
(tmp_dir / "dir" / "2").unlink()
assert dvc.status() != {} # sanity check
dvc.add("dir")
scm.add(["dir.dvc"])
scm.commit("delete a file")
# The ":/<text>" format is a way to specify revisions by commit message:
# https://git-scm.com/docs/revisions
#
assert dvc.diff(":/init", ":/directory") == {
"added": [
{
"path": os.path.join("dir", ""),
"hash": "5fb6b29836c388e093ca0715c872fe2a.dir",
},
{"path": os.path.join("dir", "1"), "hash": digest("1")},
{"path": os.path.join("dir", "2"), "hash": digest("2")},
],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
assert dvc.diff(":/directory", ":/modify") == {
"added": [{"path": os.path.join("dir", "3"), "hash": digest("3")}],
"deleted": [],
"modified": [
{
"path": os.path.join("dir", ""),
"hash": {
"old": "5fb6b29836c388e093ca0715c872fe2a.dir",
"new": "9b5faf37366b3370fd98e3e60ca439c1.dir",
},
},
{
"path": os.path.join("dir", "2"),
"hash": {"old": digest("2"), "new": digest("two")},
},
],
"not in cache": [],
"renamed": [],
}
assert dvc.diff(":/modify", ":/delete") == {
"added": [],
"deleted": [{"path": os.path.join("dir", "2"), "hash": digest("two")}],
"modified": [
{
"path": os.path.join("dir", ""),
"hash": {
"old": "9b5faf37366b3370fd98e3e60ca439c1.dir",
"new": "83ae82fb367ac9926455870773ff09e6.dir",
},
}
],
"not in cache": [],
"renamed": [],
}
def test_diff_no_cache(tmp_dir, scm, dvc):
tmp_dir.dvc_gen({"dir": {"file": "file content"}}, commit="first")
scm.tag("v1")
tmp_dir.dvc_gen(
{"dir": {"file": "modified file content"}}, commit="second"
)
scm.tag("v2")
remove(dvc.odb.local.cache_dir)
# invalidate_dir_info to force cache loading
dvc.odb.local._dir_info = {}
diff = dvc.diff("v1", "v2")
assert diff["added"] == []
assert diff["deleted"] == []
assert first(diff["modified"])["path"] == os.path.join("dir", "")
assert diff["not in cache"] == []
(tmp_dir / "dir" / "file").unlink()
remove(str(tmp_dir / "dir"))
diff = dvc.diff()
assert diff["added"] == []
assert diff["deleted"] == []
assert diff["renamed"] == []
assert diff["modified"] == []
assert diff["not in cache"] == [
{
"path": os.path.join("dir", ""),
"hash": "f0f7a307d223921557c929f944bf5303.dir",
}
]
def test_diff_dirty(tmp_dir, scm, dvc):
tmp_dir.dvc_gen(
{"file": "file_content", "dir": {"dir_file1": "dir file content"}},
commit="initial",
)
(tmp_dir / "file").unlink()
tmp_dir.gen({"dir": {"dir_file2": "dir file 2 content"}})
tmp_dir.dvc_gen("new_file", "new_file_content")
result = dvc.diff()
assert result == {
"added": [
{
"hash": digest("dir file 2 content"),
"path": os.path.join("dir", "dir_file2"),
},
{"hash": "86d049de17c76ac44cdcac146042ec9b", "path": "new_file"},
],
"deleted": [
{"hash": "7f0b6bb0b7e951b7fd2b2a4a326297e1", "path": "file"}
],
"modified": [
{
"hash": {
"new": "38175ad60f0e58ac94e0e2b7688afd81.dir",
"old": "92daf39af116ca2fb245acaeb2ae65f7.dir",
},
"path": os.path.join("dir", ""),
}
],
"not in cache": [],
"renamed": [],
}
def test_no_changes(tmp_dir, scm, dvc):
tmp_dir.dvc_gen("file", "first", commit="add a file")
assert dvc.diff() == {}
def test_no_commits(tmp_dir):
from dvc.repo import Repo
from dvc.scm.git import Git
from tests.dir_helpers import git_init
git_init(".")
assert Git().no_commits
assert Repo.init().diff() == {}
def setup_targets_test(tmp_dir):
tmp_dir.dvc_gen("file", "first", commit="add a file")
tmp_dir.dvc_gen({"dir": {"1": "1", "2": "2"}})
tmp_dir.dvc_gen("file", "second")
tmp_dir.dvc_gen(os.path.join("dir_with", "file.txt"), "first")
def test_targets_missing_path(tmp_dir, scm, dvc):
from dvc.exceptions import PathMissingError
setup_targets_test(tmp_dir)
with pytest.raises(PathMissingError):
dvc.diff(targets=["missing"])
def test_targets_single_file(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
assert dvc.diff(targets=["file"]) == {
"added": [],
"deleted": [],
"modified": [
{
"path": "file",
"hash": {"old": digest("first"), "new": digest("second")},
}
],
"not in cache": [],
"renamed": [],
}
def test_targets_single_dir(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
dir_checksum = "5fb6b29836c388e093ca0715c872fe2a.dir"
expected_result = {
"added": [
{"path": os.path.join("dir", ""), "hash": dir_checksum},
{"path": os.path.join("dir", "1"), "hash": digest("1")},
{"path": os.path.join("dir", "2"), "hash": digest("2")},
],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
assert dvc.diff(targets=["dir"]) == expected_result
assert dvc.diff(targets=["dir" + os.path.sep]) == expected_result
def test_targets_single_file_in_dir(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
assert dvc.diff(targets=[os.path.join("dir", "1")]) == {
"added": [{"path": os.path.join("dir", "1"), "hash": digest("1")}],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
def test_targets_two_files_in_dir(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
assert dvc.diff(
targets=[os.path.join("dir", "1"), os.path.join("dir", "2")]
) == {
"added": [
{"path": os.path.join("dir", "1"), "hash": digest("1")},
{"path": os.path.join("dir", "2"), "hash": digest("2")},
],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
def test_targets_file_and_dir(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
dir_checksum = "5fb6b29836c388e093ca0715c872fe2a.dir"
assert dvc.diff(targets=["file", "dir"]) == {
"added": [
{"path": os.path.join("dir", ""), "hash": dir_checksum},
{"path": os.path.join("dir", "1"), "hash": digest("1")},
{"path": os.path.join("dir", "2"), "hash": digest("2")},
],
"deleted": [],
"modified": [
{
"path": "file",
"hash": {"old": digest("first"), "new": digest("second")},
}
],
"not in cache": [],
"renamed": [],
}
def test_targets_single_dir_with_file(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
expected_result = {
"added": [
{
"path": os.path.join("dir_with", "file.txt"),
"hash": digest("first"),
}
],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
assert dvc.diff(targets=["dir_with"]) == expected_result
assert dvc.diff(targets=["dir_with" + os.path.sep]) == expected_result
def test_targets_single_file_in_dir_with_file(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
assert dvc.diff(targets=[os.path.join("dir_with", "file.txt")]) == {
"added": [
{
"path": os.path.join("dir_with", "file.txt"),
"hash": digest("first"),
}
],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
@pytest.mark.parametrize("commit_last", [True, False])
def test_diff_add_similar_files(tmp_dir, scm, dvc, commit_last):
if commit_last:
last_commit_msg = "commit #2"
a_rev = "HEAD~1"
else:
last_commit_msg = None
a_rev = "HEAD"
tmp_dir.dvc_gen(
{"dir": {"file": "text1", "subdir": {"file2": "text2"}}},
commit="commit #1",
)
tmp_dir.dvc_gen(
{"dir2": {"file": "text1", "subdir": {"file2": "text2"}}},
commit=last_commit_msg,
)
assert dvc.diff(a_rev) == {
"added": [
{
"path": os.path.join("dir2", ""),
"hash": "cb58ee07cb01044db229e4d6121a0dfc.dir",
},
{
"path": os.path.join("dir2", "file"),
"hash": "cef7ccd89dacf1ced6f5ec91d759953f",
},
{
"path": os.path.join("dir2", "subdir", "file2"),
"hash": "fe6123a759017e4a2af4a2d19961ed71",
},
],
"deleted": [],
"modified": [],
"renamed": [],
"not in cache": [],
}
@pytest.mark.parametrize("commit_last", [True, False])
def test_diff_rename_folder(tmp_dir, scm, dvc, commit_last):
if commit_last:
last_commit_msg = "commit #2"
a_rev = "HEAD~1"
else:
last_commit_msg = None
a_rev = "HEAD"
tmp_dir.dvc_gen(
{"dir": {"file": "text1", "subdir": {"file2": "text2"}}},
commit="commit #1",
)
(tmp_dir / "dir").replace(tmp_dir / "dir2")
tmp_dir.dvc_add("dir2", commit=last_commit_msg)
assert dvc.diff(a_rev) == {
"added": [],
"deleted": [],
"modified": [],
"renamed": [
{
"path": {
"old": os.path.join("dir", ""),
"new": os.path.join("dir2", ""),
},
"hash": "cb58ee07cb01044db229e4d6121a0dfc.dir",
},
{
"path": {
"old": os.path.join("dir", "file"),
"new": os.path.join("dir2", "file"),
},
"hash": "cef7ccd89dacf1ced6f5ec91d759953f",
},
{
"path": {
"old": os.path.join("dir", "subdir", "file2"),
"new": os.path.join("dir2", "subdir", "file2"),
},
"hash": "fe6123a759017e4a2af4a2d19961ed71",
},
],
"not in cache": [],
}
@pytest.mark.parametrize("commit_last", [True, False])
def test_diff_rename_file(tmp_dir, scm, dvc, commit_last):
if commit_last:
last_commit_msg = "commit #2"
a_rev = "HEAD~1"
else:
last_commit_msg = None
a_rev = "HEAD"
paths = tmp_dir.gen(
{"dir": {"file": "text1", "subdir": {"file2": "text2"}}}
)
tmp_dir.dvc_add(paths, commit="commit #1")
(tmp_dir / "dir" / "file").replace(tmp_dir / "dir" / "subdir" / "file3")
tmp_dir.dvc_add(paths, commit=last_commit_msg)
assert dvc.diff(a_rev) == {
"added": [],
"deleted": [],
"modified": [
{
"path": os.path.join("dir", ""),
"hash": {
"old": "cb58ee07cb01044db229e4d6121a0dfc.dir",
"new": "a4ac9c339aacc60b6a3152e362c319c8.dir",
},
}
],
"renamed": [
{
"path": {
"old": os.path.join("dir", "file"),
"new": os.path.join("dir", "subdir", "file3"),
},
"hash": "cef7ccd89dacf1ced6f5ec91d759953f",
}
],
"not in cache": [],
}
def test_rename_multiple_files_same_hashes(tmp_dir, scm, dvc):
"""Test diff by renaming >=2 instances of file with same hashes.
DVC should be able to detect that they are renames, and should not include
them in either of the `added` or the `deleted` section.
"""
tmp_dir.dvc_gen(
{"dir": {"foo": "foo", "subdir": {"foo": "foo"}}}, commit="commit #1"
)
remove(tmp_dir / "dir")
# changing foo and subdir/foo to bar and subdir/bar respectively
tmp_dir.dvc_gen(
{"dir": {"bar": "foo", "subdir": {"bar": "foo"}}}, commit="commit #2"
)
assert dvc.diff("HEAD~") == {
"added": [],
"deleted": [],
"modified": [
{
"hash": {
"new": "31b36b3ea5f4485e27f10578c47183b0.dir",
"old": "c7684c8b3b0d28cf80d5305e2d856bfc.dir",
},
"path": os.path.join("dir", ""),
}
],
"not in cache": [],
"renamed": [
{
"hash": "acbd18db4cc2f85cedef654fccc4a4d8",
"path": {
"new": os.path.join("dir", "bar"),
"old": os.path.join("dir", "foo"),
},
},
{
"hash": "acbd18db4cc2f85cedef654fccc4a4d8",
"path": {
"new": os.path.join("dir", "subdir", "bar"),
"old": os.path.join("dir", "subdir", "foo"),
},
},
],
}
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import inspect
import mxnet as mx
from mxnet import nd
from mxnet import np
from .ndarray_utils import get_mx_ndarray, nd_forward_and_profile, nd_forward_backward_and_profile
from .common_utils import merge_map_list
from .op_registry_utils import prepare_op_inputs
from benchmark.opperf.rules.default_params import PARAMS_OF_TYPE_NDARRAY, PARAMS_OF_TYPE_NP_ARRAY
from .profiler_utils import cpp_profile, python_profile
no_backward = {'gather_nd', 'softmax_cross_entropy', 'linalg_gelqf', 'linalg_slogdet', 'moments', 'SequenceLast', 'Embedding'}
def _prepare_op_inputs(inputs, run_backward, dtype, ctx, module):
mx.random.seed(41)
kwargs_list = []
if module == 'mxnet.numpy_extension' or module == 'mxnet.numpy':
PARAMS_TYPE = PARAMS_OF_TYPE_NP_ARRAY
get_array_fn = get_mx_np_ndarray
else:
PARAMS_TYPE = PARAMS_OF_TYPE_NDARRAY
get_array_fn = get_mx_ndarray
for inp in inputs:
kwargs = {}
for key, value in inp.items():
if key in PARAMS_TYPE:
kwargs[key] = get_array_fn(ctx=ctx, in_tensor=value,
dtype=dtype,
initializer=nd.normal,
attach_grad=run_backward)
else:
kwargs[key] = value
kwargs_list.append(kwargs)
return kwargs_list
def get_mx_np_ndarray(ctx, in_tensor, dtype, initializer, attach_grad=True):
"""Helper function to prepare a MXNet Numpy NDArray tensor in given Context (ctx) of type (dtype).
You can get a new Tensor by providing only "Shape" or "Numpy NDArray" or another MXNet NDArray as
"in_tensor".
NOTE: This is a sync call and waits for the Tensor to be created.
Parameters
----------
ctx: mx.ctx, default mx.cpu()
Context of the new MXNet NDArray Tensor.
in_tensor: Numpy NDArray or MXNet NDArray or Tuple of shape
Can be a tuple of shape or Numpy NDArray or MXNet NDArray.
dtype: str
Precision or Dtype of the expected Tensor. Ex: "float32", "Int64"
initializer:
Function reference to the initialize to use. Ex: mx.nd.random.normal, mx.nd.zeros
attach_grad: Boolean, default True
To attach a gradient for the Tensor. Default is True.
Returns
-------
MXNet NDArray Tensor.
"""
if isinstance(in_tensor, int) or isinstance(in_tensor, float):
return in_tensor
if isinstance(in_tensor, tuple):
nd_ndarray = get_mx_ndarray(ctx=ctx, in_tensor=in_tensor,
dtype="float32",
initializer=initializer,
attach_grad=attach_grad)
tensor = nd_ndarray.as_np_ndarray().astype(dtype=dtype)
elif isinstance(in_tensor, list):
tensor = np.array(in_tensor, ctx=ctx)
elif isinstance(in_tensor, nd.NDArray):
tensor = in_tensor.as_np_ndarray()
elif isinstance(in_tensor, np.ndarray):
tensor = in_tensor.as_in_context(ctx)
else:
raise ValueError("Invalid input type for creating input tensor. Input can be tuple() of shape or Numpy Array or"
" MXNet NDArray. Given - ", in_tensor)
if attach_grad:
tensor.attach_grad()
tensor.wait_to_read()
return tensor
def parse_input_ndarray(input_dict):
"""Parse input for ndarray and extract array shape for better readability
Parameters
----------
input_dict : dict
Dictionary of input
Input Dictionary
'inputs': {'weight':
[[ 2.2122064 0.7740038 1.0434405 1.1839255 1.8917114 ]
[-1.2347414 -1.771029 -0.45138445 0.57938355 -1.856082 ]
[-1.9768796 -0.20801921 0.2444218 -0.03716067 -0.48774993]
[-0.02261727 0.57461417 1.4661262 0.6862904 0.35496104]
[ 1.0731696 0.12017461 -0.9711102 -0.77569664 -0.7882176 ]]
<NDArray 5x5 @cpu(0)>, 'grad':
[[ 0.7417728 -1.4734439 -1.0730928 -1.0424827 -1.3278849 ]
[-1.4749662 -0.52414197 1.2662556 0.8950642 -0.6015945 ]
[ 1.2040559 -0.9712193 -0.58256227 0.3717077 0.9300072 ]
[-1.4225755 -0.5176199 2.0088325 0.2863085 0.5604595 ]
[ 0.96975976 -0.52853745 -1.88909 0.65479124 -0.45481315]]
<NDArray 5x5 @cpu(0)>, 'mean':
[[ 0.32510808 -1.3002341 0.3679345 1.4534262 0.24154152]
[ 0.47898006 0.96885103 -1.0218245 -0.06812762 -0.31868345]
[-0.17634277 0.35655284 0.74419165 0.7787424 0.6087823 ]
[ 1.0741756 0.06642842 0.8486986 -0.8003802 -0.16882208]
[ 0.93632793 0.357444 0.77932847 -1.0103073 -0.39157307]]
<NDArray 5x5 @cpu(0)>, 'var':
[[ 1.3166187 -0.43292624 0.71535987 0.9254156 -0.90495086]
[-0.074684 0.82254 -1.8785107 0.8858836 1.9118724 ]
[ 0.33342266 0.11883813 -1.9198899 -0.67558455 1.007749 ]
[-0.35391203 1.6323917 -0.33354783 -1.7378405 0.7737382 ]
[ 0.89126545 3.2904532 -1.1976235 1.8938874 -0.5669272 ]]
<NDArray 5x5 @cpu(0)>, 't': 1, 'wd': 0.1}
Output
{'inputs': {'weight': '<NDArray 5x5 @cpu(0)>', 'grad': '<NDArray 5x5 @cpu(0)>', 'mean': '<NDArray 5x5 @cpu(0)>', 'var': '<NDArray 5x5 @cpu(0)>', 't': 1, 'wd': 0.1}
"""
no_new_line_input_dict=dict()
for key,value in input_dict.items():
if isinstance(value,nd.NDArray):
# if value in input is NDArray then extract last line only
val = str(value).split('\n')[-1]
no_new_line_input_dict[key]=val
else:
no_new_line_input_dict[key]=value
return no_new_line_input_dict
def _run_operator_performance_test(op, inputs, run_backward, warmup, runs, kwargs_list, profiler):
if profiler == 'native':
if run_backward:
benchmark_helper_func = cpp_profile(nd_forward_backward_and_profile)
else:
benchmark_helper_func = cpp_profile(nd_forward_and_profile)
elif profiler == 'python':
if run_backward:
benchmark_helper_func = python_profile(nd_forward_backward_and_profile)
else:
benchmark_helper_func = python_profile(nd_forward_and_profile)
else:
raise ValueError("Incorrect input for profiler. Valid input - 'python' or 'native'")
# Warm up, ignore the profiler output
_, _ = benchmark_helper_func(op, warmup, **kwargs_list[0])
# Run Benchmarks
op_benchmark_result = {op.__name__: []}
logging.info("Begin Benchmark - {name}".format(name=op.__name__))
for idx, kwargs in enumerate(kwargs_list):
_, profiler_output = benchmark_helper_func(op, runs, **kwargs)
# Add inputs used for profiling this operator into result
# parse input if it contains ndarray, replace with shape info for better markdown readability
new_inp = parse_input_ndarray(inputs[idx])
profiler_output = merge_map_list([{"inputs": new_inp}] + [profiler_output])
op_benchmark_result[op.__name__].append(profiler_output)
logging.info("Complete Benchmark - {name}".format(name=op.__name__))
return op_benchmark_result
def run_performance_test(ops, inputs, run_backward=True,
dtype='float32', ctx=mx.cpu(), profiler='native',
warmup=10, runs=50):
"""Run operator benchmark for given operator or list of operators, ops, with the given inputs.
Returns benchmark results as a list of dictionary where each dictionary represents benchmarks result per operator.
key -> name of the operator and value -> map of results (forward time, backward time, time spent in memory
operations.
Parameters
----------
ops: [Str]
One or list of operators to benchmark. Should be an NDArray, Numpy or Numpy_extension operator.
inputs: map
Inputs for operator. Key should be name of parameter for operator.
Example: inputs = {"lhs": (1024, 1024), "rhs": (1024, 1024)} for mx.nd.add or
inputs = {"x1": (1024, 1024), "x2": (1024, 1024)} for mx.np.add
run_backward: Boolean, Default is True
Should we have backward operator benchmarks.
dtype: Str, default 'float32'
Precision to use for input tensors. Defaults to float32. Example: 'float32', 'int64'
ctx: mx.ctx, default mx.cpu()
Context to use for benchmarks. Default to mx.cpu()
profiler: Str, default 'native'
Type of profiler to run benchmarks. Default to 'native'
Option - ['python', 'native']
warmup: int, default 10
Number of warmup runs
runs: int, default 50
Number of runs for capturing benchmark results
Returns
-------
List of dictionary of benchmark results. key -> name of the operator, Value is benchmark results.
Note: when run_performance_test is called on the nd.Embedding operator with run_backward=True, an error will
be thrown. Track issue here: https://github.com/apache/incubator-mxnet/issues/11314
"""
if not isinstance(ops, list):
ops = [ops]
op_benchmark_result = []
for op in ops:
if hasattr(mx.nd, op.__name__) or hasattr(mx.np, op.__name__) or hasattr(mx.npx, op.__name__):
kwargs_list = _prepare_op_inputs(inputs, run_backward, dtype, ctx, op.__module__)
benchmark_result = _run_operator_performance_test(op, inputs, run_backward, warmup, runs, kwargs_list, profiler)
else:
raise ValueError("Unknown {0} operator provided to benchmark. - {1}".format(op.__module__, op.__name__))
op_benchmark_result.append(benchmark_result)
return op_benchmark_result
def run_benchmark_operator(name, size = (128,128), additional_inputs = {},
dtype = 'float32', run_backward = False, ctx = mx.cpu(),
warmup=10, runs=50, profiler="native"):
arg_list = {mx.nd: PARAMS_OF_TYPE_NDARRAY, mx.np: PARAMS_OF_TYPE_NP_ARRAY, mx.npx: PARAMS_OF_TYPE_NP_ARRAY}
modules = [mx.nd, mx.np, mx.npx]
responses = []
for module in modules:
if hasattr(module, name):
function = getattr(module, name)
args = inspect.getargspec(function).args
inputs = {}
for arg in args:
if arg in additional_inputs.keys():
inputs.update({arg: additional_inputs[arg]})
elif arg in arg_list[module]:
inputs.update({arg:size})
res = run_performance_test(function, run_backward=run_backward, dtype=dtype, ctx=ctx,
inputs=[inputs],
warmup=warmup, runs=runs, profiler=profiler)
responses.append(res)
else:
responses.append(str(module.__name__) + " does not have operator " + name)
for i in range(len(modules)):
print(modules[i].__name__)
print(responses[i])
def run_op_benchmarks(ops, dtype, ctx, profiler, int64_tensor, warmup, runs):
# Running im2col either forwards or backwards on GPU results in errors
# track issue here: https://github.com/apache/incubator-mxnet/issues/17493
gpu_disabled_ops = ['im2col']
# For each operator, run benchmarks
mx_op_benchmark_results = []
for op, op_params in ops.items():
if ctx == mx.cpu() or op not in gpu_disabled_ops:
# Prepare inputs for the operator
inputs = prepare_op_inputs(op, op_params, int64_tensor)
# setting backward false for ops with known issue
if op in no_backward:
op_params["has_backward"] = False
# Run benchmarks
cur_op_res = run_performance_test(op_params["nd_op_handle"],
run_backward=op_params["has_backward"],
dtype=dtype, ctx=ctx,
profiler=profiler,
inputs=inputs,
warmup=warmup, runs=runs)
mx_op_benchmark_results += cur_op_res
# Prepare combined results for all operators
mx_op_benchmark_results = merge_map_list(mx_op_benchmark_results)
return mx_op_benchmark_results
| |
"""
A custom manager for working with trees of objects.
"""
from __future__ import unicode_literals
import contextlib
from itertools import groupby
import django
from django.db import models, connections, router
from django.db.models import F, ManyToManyField, Max, Q
from django.utils.translation import ugettext as _
from mptt.exceptions import CantDisableUpdates, InvalidMove
from mptt.querysets import TreeQuerySet
from mptt.utils import _get_tree_model
from mptt.signals import node_moved
__all__ = ('TreeManager',)
COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s = %(mptt_table)s.%(mptt_rel_to)s
)"""
CUMULATIVE_COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s IN
(
SELECT m2.%(mptt_rel_to)s
FROM %(mptt_table)s m2
WHERE m2.%(tree_id)s = %(mptt_table)s.%(tree_id)s
AND m2.%(left)s BETWEEN %(mptt_table)s.%(left)s
AND %(mptt_table)s.%(right)s
)
)"""
COUNT_SUBQUERY_M2M = """(
SELECT COUNT(*)
FROM %(rel_table)s j
INNER JOIN %(rel_m2m_table)s k ON j.%(rel_pk)s = k.%(rel_m2m_column)s
WHERE k.%(mptt_fk)s = %(mptt_table)s.%(mptt_pk)s
)"""
CUMULATIVE_COUNT_SUBQUERY_M2M = """(
SELECT COUNT(*)
FROM %(rel_table)s j
INNER JOIN %(rel_m2m_table)s k ON j.%(rel_pk)s = k.%(rel_m2m_column)s
WHERE k.%(mptt_fk)s IN
(
SELECT m2.%(mptt_pk)s
FROM %(mptt_table)s m2
WHERE m2.%(tree_id)s = %(mptt_table)s.%(tree_id)s
AND m2.%(left)s BETWEEN %(mptt_table)s.%(left)s
AND %(mptt_table)s.%(right)s
)
)"""
class TreeManager(models.Manager.from_queryset(TreeQuerySet)):
"""
A manager for working with trees of objects.
"""
def contribute_to_class(self, model, name):
super(TreeManager, self).contribute_to_class(model, name)
if not model._meta.abstract and not model._meta.swapped:
self.tree_model = _get_tree_model(model)
self._base_manager = None
if self.tree_model and self.tree_model is not model:
# _base_manager is the treemanager on tree_model
self._base_manager = self.tree_model._tree_manager
def get_queryset(self, *args, **kwargs):
"""
Ensures that this manager always returns nodes in tree order.
"""
if django.VERSION < (1, 7):
qs = TreeQuerySet(self.model, using=self._db)
else:
qs = super(TreeManager, self).get_queryset(*args, **kwargs)
return qs.order_by(self.tree_id_attr, self.left_attr)
def _get_queryset_relatives(self, queryset, direction, include_self):
"""
Returns a queryset containing either the descendants
``direction == desc`` or the ancestors ``direction == asc`` of a given
queryset.
This function is not meant to be called directly, although there is no
harm in doing so.
Instead, it should be used via ``get_queryset_descendants()`` and/or
``get_queryset_ancestors()``.
This function works by grouping contiguous siblings and using them to create
a range that selects all nodes between the range, instead of querying for each
node individually. Three variables are required when querying for ancestors or
descendants: tree_id_attr, left_attr, right_attr. If we weren't using ranges
and our queryset contained 100 results, the resulting SQL query would contain
300 variables. However, when using ranges, if the same queryset contained 10
sets of contiguous siblings, then the resulting SQL query should only contain
30 variables.
The attributes used to create the range are completely
dependent upon whether you are ascending or descending the tree.
* Ascending (ancestor nodes): select all nodes whose right_attr is greater
than (or equal to, if include_self = True) the smallest right_attr within
the set of contiguous siblings, and whose left_attr is less than (or equal
to) the largest left_attr within the set of contiguous siblings.
* Descending (descendant nodes): select all nodes whose left_attr is greater
than (or equal to, if include_self = True) the smallest left_attr within
the set of contiguous siblings, and whose right_attr is less than (or equal
to) the largest right_attr within the set of contiguous siblings.
The result is the more contiguous siblings in the original queryset, the fewer
SQL variables will be required to execute the query.
"""
assert self.model is queryset.model
opts = queryset.model._mptt_meta
filters = Q()
e = 'e' if include_self else ''
max_op = 'lt' + e
min_op = 'gt' + e
if direction == 'asc':
max_attr = opts.left_attr
min_attr = opts.right_attr
elif direction == 'desc':
max_attr = opts.right_attr
min_attr = opts.left_attr
tree_key = opts.tree_id_attr
min_key = '%s__%s' % (min_attr, min_op)
max_key = '%s__%s' % (max_attr, max_op)
q = queryset.order_by(opts.tree_id_attr, opts.parent_attr, opts.left_attr).only(
opts.tree_id_attr,
opts.left_attr,
opts.right_attr,
min_attr,
max_attr,
opts.parent_attr,
# These fields are used by MPTTModel.update_mptt_cached_fields()
*opts.order_insertion_by
)
if not q:
return self.none()
for group in groupby(
q,
key=lambda n: (
getattr(n, opts.tree_id_attr),
getattr(n, opts.parent_attr + '_id'),
)):
next_lft = None
for node in list(group[1]):
tree, lft, rght, min_val, max_val = (getattr(node, opts.tree_id_attr),
getattr(node, opts.left_attr),
getattr(node, opts.right_attr),
getattr(node, min_attr),
getattr(node, max_attr))
if next_lft is None:
next_lft = rght + 1
min_max = {'min': min_val, 'max': max_val}
elif lft == next_lft:
if min_val < min_max['min']:
min_max['min'] = min_val
if max_val > min_max['max']:
min_max['max'] = max_val
next_lft = rght + 1
elif lft != next_lft:
filters |= Q(**{
tree_key: tree,
min_key: min_max['min'],
max_key: min_max['max'],
})
min_max = {'min': min_val, 'max': max_val}
next_lft = rght + 1
filters |= Q(**{
tree_key: tree,
min_key: min_max['min'],
max_key: min_max['max'],
})
return self.filter(filters)
def get_queryset_descendants(self, queryset, include_self=False):
"""
Returns a queryset containing the descendants of all nodes in the
given queryset.
If ``include_self=True``, nodes in ``queryset`` will also
be included in the result.
"""
return self._get_queryset_relatives(queryset, 'desc', include_self)
def get_queryset_ancestors(self, queryset, include_self=False):
"""
Returns a queryset containing the ancestors
of all nodes in the given queryset.
If ``include_self=True``, nodes in ``queryset`` will also
be included in the result.
"""
return self._get_queryset_relatives(queryset, 'asc', include_self)
@contextlib.contextmanager
def disable_mptt_updates(self):
"""
Context manager. Disables mptt updates.
NOTE that this context manager causes inconsistencies! MPTT model
methods are not guaranteed to return the correct results.
When to use this method:
If used correctly, this method can be used to speed up bulk
updates.
This doesn't do anything clever. It *will* mess up your tree. You
should follow this method with a call to ``TreeManager.rebuild()``
to ensure your tree stays sane, and you should wrap both calls in a
transaction.
This is best for updates that span a large part of the table. If
you are doing localised changes (one tree, or a few trees) consider
using ``delay_mptt_updates``.
If you are making only minor changes to your tree, just let the
updates happen.
Transactions:
This doesn't enforce any transactional behavior. You should wrap
this in a transaction to ensure database consistency.
If updates are already disabled on the model, this is a noop.
Usage::
with transaction.atomic():
with MyNode.objects.disable_mptt_updates():
## bulk updates.
MyNode.objects.rebuild()
"""
# Error cases:
if self.model._meta.abstract:
# an abstract model. Design decision needed - do we disable
# updates for all concrete models that derive from this model? I
# vote no - that's a bit implicit and it's a weird use-case
# anyway. Open to further discussion :)
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s,"
" it's an abstract model" % self.model.__name__
)
elif self.model._meta.proxy:
# a proxy model. disabling updates would implicitly affect other
# models using the db table. Caller should call this on the
# manager for the concrete model instead, to make the behavior
# explicit.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it's a proxy"
" model. Call the concrete model instead."
% self.model.__name__
)
elif self.tree_model is not self.model:
# a multiple-inheritance child of an MPTTModel. Disabling
# updates may affect instances of other models in the tree.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it doesn't"
" contain the mptt fields."
% self.model.__name__
)
if not self.model._mptt_updates_enabled:
# already disabled, noop.
yield
else:
self.model._set_mptt_updates_enabled(False)
try:
yield
finally:
self.model._set_mptt_updates_enabled(True)
@contextlib.contextmanager
def delay_mptt_updates(self):
"""
Context manager. Delays mptt updates until the end of a block of bulk
processing.
NOTE that this context manager causes inconsistencies! MPTT model
methods are not guaranteed to return the correct results until the end
of the context block.
When to use this method:
If used correctly, this method can be used to speed up bulk
updates. This is best for updates in a localised area of the db
table, especially if all the updates happen in a single tree and
the rest of the forest is left untouched. No subsequent rebuild is
necessary.
``delay_mptt_updates`` does a partial rebuild of the modified trees
(not the whole table). If used indiscriminately, this can actually
be much slower than just letting the updates occur when they're
required.
The worst case occurs when every tree in the table is modified just
once. That results in a full rebuild of the table, which can be
*very* slow.
If your updates will modify most of the trees in the table (not a
small number of trees), you should consider using
``TreeManager.disable_mptt_updates``, as it does much fewer
queries.
Transactions:
This doesn't enforce any transactional behavior. You should wrap
this in a transaction to ensure database consistency.
Exceptions:
If an exception occurs before the processing of the block, delayed
updates will not be applied.
Usage::
with transaction.atomic():
with MyNode.objects.delay_mptt_updates():
## bulk updates.
"""
with self.disable_mptt_updates():
if self.model._mptt_is_tracking:
# already tracking, noop.
yield
else:
self.model._mptt_start_tracking()
try:
yield
except Exception:
# stop tracking, but discard results
self.model._mptt_stop_tracking()
raise
results = self.model._mptt_stop_tracking()
partial_rebuild = self.partial_rebuild
for tree_id in results:
partial_rebuild(tree_id)
@property
def parent_attr(self):
return self.model._mptt_meta.parent_attr
@property
def left_attr(self):
return self.model._mptt_meta.left_attr
@property
def right_attr(self):
return self.model._mptt_meta.right_attr
@property
def tree_id_attr(self):
return self.model._mptt_meta.tree_id_attr
@property
def level_attr(self):
return self.model._mptt_meta.level_attr
def _translate_lookups(self, **lookups):
new_lookups = {}
join_parts = '__'.join
for k, v in lookups.items():
parts = k.split('__')
new_parts = []
new_parts__append = new_parts.append
for part in parts:
new_parts__append(getattr(self, part + '_attr', part))
new_lookups[join_parts(new_parts)] = v
return new_lookups
def _mptt_filter(self, qs=None, **filters):
"""
Like ``self.filter()``, but translates name-agnostic filters for MPTT
fields.
"""
if self._base_manager:
return self._base_manager._mptt_filter(qs=qs, **filters)
if qs is None:
qs = self
return qs.filter(**self._translate_lookups(**filters))
def _mptt_update(self, qs=None, **items):
"""
Like ``self.update()``, but translates name-agnostic MPTT fields.
"""
if self._base_manager:
return self._base_manager._mptt_update(qs=qs, **items)
if qs is None:
qs = self
return qs.update(**self._translate_lookups(**items))
def _get_connection(self, **hints):
return connections[router.db_for_write(self.model, **hints)]
def add_related_count(self, queryset, rel_model, rel_field, count_attr,
cumulative=False):
"""
Adds a related item count to a given ``QuerySet`` using its
``extra`` method, for a ``Model`` class which has a relation to
this ``Manager``'s ``Model`` class.
Arguments:
``rel_model``
A ``Model`` class which has a relation to this `Manager``'s
``Model`` class.
``rel_field``
The name of the field in ``rel_model`` which holds the
relation.
``count_attr``
The name of an attribute which should be added to each item in
this ``QuerySet``, containing a count of how many instances
of ``rel_model`` are related to it through ``rel_field``.
``cumulative``
If ``True``, the count will be for each item and all of its
descendants, otherwise it will be for each item itself.
"""
connection = self._get_connection()
qn = connection.ops.quote_name
meta = self.model._meta
mptt_field = rel_model._meta.get_field(rel_field)
if isinstance(mptt_field, ManyToManyField):
if cumulative:
subquery = CUMULATIVE_COUNT_SUBQUERY_M2M % {
'rel_table': qn(rel_model._meta.db_table),
'rel_pk': qn(rel_model._meta.pk.column),
'rel_m2m_table': qn(mptt_field.m2m_db_table()),
'rel_m2m_column': qn(mptt_field.m2m_column_name()),
'mptt_fk': qn(mptt_field.m2m_reverse_name()),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
'tree_id': qn(meta.get_field(self.tree_id_attr).column),
'left': qn(meta.get_field(self.left_attr).column),
'right': qn(meta.get_field(self.right_attr).column),
}
else:
subquery = COUNT_SUBQUERY_M2M % {
'rel_table': qn(rel_model._meta.db_table),
'rel_pk': qn(rel_model._meta.pk.column),
'rel_m2m_table': qn(mptt_field.m2m_db_table()),
'rel_m2m_column': qn(mptt_field.m2m_column_name()),
'mptt_fk': qn(mptt_field.m2m_reverse_name()),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
}
else:
if cumulative:
subquery = CUMULATIVE_COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_rel_to': qn(mptt_field.rel.field_name),
'tree_id': qn(meta.get_field(self.tree_id_attr).column),
'left': qn(meta.get_field(self.left_attr).column),
'right': qn(meta.get_field(self.right_attr).column),
}
else:
subquery = COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_rel_to': qn(mptt_field.rel.field_name),
}
return queryset.extra(select={count_attr: subquery})
def insert_node(self, node, target, position='last-child', save=False,
allow_existing_pk=False, refresh_target=True):
"""
Sets up the tree state for ``node`` (which has not yet been
inserted into in the database) so it will be positioned relative
to a given ``target`` node as specified by ``position`` (when
appropriate) it is inserted, with any neccessary space already
having been made for it.
A ``target`` of ``None`` indicates that ``node`` should be
the last root node.
If ``save`` is ``True``, ``node``'s ``save()`` method will be
called before it is returned.
NOTE: This is a low-level method; it does NOT respect
``MPTTMeta.order_insertion_by``. In most cases you should just
set the node's parent and let mptt call this during save.
"""
if self._base_manager:
return self._base_manager.insert_node(
node, target, position=position, save=save, allow_existing_pk=allow_existing_pk)
if node.pk and not allow_existing_pk and self.filter(pk=node.pk).exists():
raise ValueError(_('Cannot insert a node which has already been saved.'))
if target is None:
tree_id = self._get_next_tree_id()
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
elif target.is_root_node() and position in ['left', 'right']:
if refresh_target:
# Ensure mptt values on target are not stale.
target._mptt_refresh()
target_tree_id = getattr(target, self.tree_id_attr)
if position == 'left':
tree_id = target_tree_id
space_target = target_tree_id - 1
else:
tree_id = target_tree_id + 1
space_target = target_tree_id
self._create_tree_space(space_target)
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
else:
setattr(node, self.left_attr, 0)
setattr(node, self.level_attr, 0)
if refresh_target:
# Ensure mptt values on target are not stale.
target._mptt_refresh()
space_target, level, left, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
tree_id = getattr(target, self.tree_id_attr)
self._create_space(2, space_target, tree_id)
setattr(node, self.left_attr, -left)
setattr(node, self.right_attr, -left + 1)
setattr(node, self.level_attr, -level)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, parent)
if parent:
self._post_insert_update_cached_parent_right(parent, right_shift)
if save:
node.save()
return node
def _move_node(self, node, target, position='last-child', save=True, refresh_target=True):
if self._base_manager:
return self._base_manager._move_node(node, target, position=position,
save=save, refresh_target=refresh_target)
if self.tree_model._mptt_is_tracking:
# delegate to insert_node and clean up the gaps later.
return self.insert_node(node, target, position=position, save=save,
allow_existing_pk=True, refresh_target=refresh_target)
else:
if target is None:
if node.is_child_node():
self._make_child_root_node(node)
elif target.is_root_node() and position in ('left', 'right'):
self._make_sibling_of_root_node(node, target, position)
else:
if node.is_root_node():
self._move_root_node(node, target, position)
else:
self._move_child_node(node, target, position)
def move_node(self, node, target, position='last-child'):
"""
Moves ``node`` relative to a given ``target`` node as specified
by ``position`` (when appropriate), by examining both nodes and
calling the appropriate method to perform the move.
A ``target`` of ``None`` indicates that ``node`` should be
turned into a root node.
Valid values for ``position`` are ``'first-child'``,
``'last-child'``, ``'left'`` or ``'right'``.
``node`` will be modified to reflect its new tree state in the
database.
This method explicitly checks for ``node`` being made a sibling
of a root node, as this is a special case due to our use of tree
ids to order root nodes.
NOTE: This is a low-level method; it does NOT respect
``MPTTMeta.order_insertion_by``. In most cases you should just
move the node yourself by setting node.parent.
"""
self._move_node(node, target, position=position)
node_moved.send(sender=node.__class__, instance=node,
target=target, position=position)
def root_node(self, tree_id):
"""
Returns the root node of the tree with the given id.
"""
if self._base_manager:
return self._base_manager.root_node(tree_id)
return self._mptt_filter(tree_id=tree_id, parent=None).get()
def root_nodes(self):
"""
Creates a ``QuerySet`` containing root nodes.
"""
if self._base_manager:
return self._base_manager.root_nodes()
return self._mptt_filter(parent=None)
def rebuild(self):
"""
Rebuilds all trees in the database table using `parent` link.
"""
if self._base_manager:
return self._base_manager.rebuild()
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
idx = 0
for pk in pks:
idx += 1
rebuild_helper(pk, 1, idx)
rebuild.alters_data = True
def partial_rebuild(self, tree_id):
"""
Partially rebuilds a tree i.e. It rebuilds only the tree with given
``tree_id`` in database table using ``parent`` link.
"""
if self._base_manager:
return self._base_manager.partial_rebuild(tree_id)
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None, tree_id=tree_id)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
if not pks:
return
if len(pks) > 1:
raise RuntimeError(
"More than one root node with tree_id %d. That's invalid,"
" do a full rebuild." % tree_id)
self._rebuild_helper(pks[0], 1, tree_id)
def _rebuild_helper(self, pk, left, tree_id, level=0):
opts = self.model._mptt_meta
right = left + 1
qs = self._mptt_filter(parent__pk=pk)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
child_ids = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
for child_id in child_ids:
right = rebuild_helper(child_id, right, tree_id, level + 1)
qs = self.model._default_manager.filter(pk=pk)
self._mptt_update(
qs,
left=left,
right=right,
level=level,
tree_id=tree_id
)
return right + 1
def _post_insert_update_cached_parent_right(self, instance, right_shift, seen=None):
setattr(instance, self.right_attr, getattr(instance, self.right_attr) + right_shift)
attr = '_%s_cache' % self.parent_attr
if hasattr(instance, attr):
parent = getattr(instance, attr)
if parent:
if not seen:
seen = set()
seen.add(instance)
if parent in seen:
# detect infinite recursion and throw an error
raise InvalidMove
self._post_insert_update_cached_parent_right(parent, right_shift, seen=seen)
def _calculate_inter_tree_move_values(self, node, target, position):
"""
Calculates values required when moving ``node`` relative to
``target`` as specified by ``position``.
"""
left = getattr(node, self.left_attr)
level = getattr(node, self.level_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if position == 'last-child':
space_target = target_right - 1
else:
space_target = target_left
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if position == 'left':
space_target = target_left - 1
else:
space_target = target_right
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_right_change = left - space_target - 1
right_shift = 0
if parent:
right_shift = 2 * (node.get_descendant_count() + 1)
return space_target, level_change, left_right_change, parent, right_shift
def _close_gap(self, size, target, tree_id):
"""
Closes a gap of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(-size, target, tree_id)
def _create_space(self, size, target, tree_id):
"""
Creates a space of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(size, target, tree_id)
def _create_tree_space(self, target_tree_id, num_trees=1):
"""
Creates space for a new tree by incrementing all tree ids
greater than ``target_tree_id``.
"""
qs = self._mptt_filter(tree_id__gt=target_tree_id)
self._mptt_update(qs, tree_id=F(self.tree_id_attr) + num_trees)
self.tree_model._mptt_track_tree_insertions(target_tree_id + 1, num_trees)
def _get_next_tree_id(self):
"""
Determines the next largest unused tree id for the tree managed
by this manager.
"""
max_tree_id = list(self.aggregate(Max(self.tree_id_attr)).values())[0]
max_tree_id = max_tree_id or 0
return max_tree_id + 1
def _inter_tree_move_and_close_gap(
self, node, level_change,
left_right_change, new_tree_id, parent_pk=None):
"""
Removes ``node`` from its current tree, with the given set of
changes being applied to ``node`` and its descendants, closing
the gap left by moving ``node`` as it does so.
If ``parent_pk`` is ``None``, this indicates that ``node`` is
being moved to a brand new tree as its root node, and will thus
have its parent field set to ``NULL``. Otherwise, ``node`` will
have ``parent_pk`` set for its parent field.
"""
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
inter_tree_move_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(tree_id)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %%s
ELSE %(tree_id)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s - %%s
WHEN %(left)s > %%s
THEN %(left)s - %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s - %%s
WHEN %(right)s > %%s
THEN %(right)s - %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %(new_parent)s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
'new_parent': parent_pk is None and 'NULL' or '%s',
}
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
gap_size = right - left + 1
gap_target_left = left - 1
params = [
left, right, level_change,
left, right, new_tree_id,
left, right, left_right_change,
gap_target_left, gap_size,
left, right, left_right_change,
gap_target_left, gap_size,
node.pk,
getattr(node, self.tree_id_attr)
]
if parent_pk is not None:
params.insert(-1, parent_pk)
cursor = connection.cursor()
cursor.execute(inter_tree_move_query, params)
def _make_child_root_node(self, node, new_tree_id=None):
"""
Removes ``node`` from its tree, making it the root node of a new
tree.
If ``new_tree_id`` is not specified a new tree id will be
generated.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
if not new_tree_id:
new_tree_id = self._get_next_tree_id()
left_right_change = left - 1
self._inter_tree_move_and_close_gap(node, level, left_right_change, new_tree_id)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, None)
node._mptt_cached_fields[self.parent_attr] = None
def _make_sibling_of_root_node(self, node, target, position):
"""
Moves ``node``, making it a sibling of the given ``target`` root
node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
Since we use tree ids to reduce the number of rows affected by
tree mangement during insertion and deletion, root nodes are not
true siblings; thus, making an item a sibling of a root node is
a special case which involves shuffling tree ids around.
"""
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
opts = self.model._meta
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if node.is_child_node():
if position == 'left':
space_target = target_tree_id - 1
new_tree_id = target_tree_id
elif position == 'right':
space_target = target_tree_id
new_tree_id = target_tree_id + 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
self._create_tree_space(space_target)
if tree_id > space_target:
# The node's tree id has been incremented in the
# database - this change must be reflected in the node
# object for the method call below to operate on the
# correct tree.
setattr(node, self.tree_id_attr, tree_id + 1)
self._make_child_root_node(node, new_tree_id)
else:
if position == 'left':
if target_tree_id > tree_id:
left_sibling = target.get_previous_sibling()
if node == left_sibling:
return
new_tree_id = getattr(left_sibling, self.tree_id_attr)
lower_bound, upper_bound = tree_id, new_tree_id
shift = -1
else:
new_tree_id = target_tree_id
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
elif position == 'right':
if target_tree_id > tree_id:
new_tree_id = target_tree_id
lower_bound, upper_bound = tree_id, target_tree_id
shift = -1
else:
right_sibling = target.get_next_sibling()
if node == right_sibling:
return
new_tree_id = getattr(right_sibling, self.tree_id_attr)
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
root_sibling_query = """
UPDATE %(table)s
SET %(tree_id)s = CASE
WHEN %(tree_id)s = %%s
THEN %%s
ELSE %(tree_id)s + %%s END
WHERE %(tree_id)s >= %%s AND %(tree_id)s <= %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(root_sibling_query, [tree_id, new_tree_id, shift,
lower_bound, upper_bound])
setattr(node, self.tree_id_attr, new_tree_id)
def _manage_space(self, size, target, tree_id):
"""
Manages spaces in the tree identified by ``tree_id`` by changing
the values of the left and right columns by ``size`` after the
given ``target`` point.
"""
if self.tree_model._mptt_is_tracking:
self.tree_model._mptt_track_tree_modified(tree_id)
else:
connection = self._get_connection()
qn = connection.ops.quote_name
opts = self.model._meta
space_query = """
UPDATE %(table)s
SET %(left)s = CASE
WHEN %(left)s > %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s > %%s
THEN %(right)s + %%s
ELSE %(right)s END
WHERE %(tree_id)s = %%s
AND (%(left)s > %%s OR %(right)s > %%s)""" % {
'table': qn(self.tree_model._meta.db_table),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(space_query, [target, size, target, size, tree_id,
target, target])
def _move_child_node(self, node, target, position):
"""
Calls the appropriate method to move child node ``node``
relative to the given ``target`` node as specified by
``position``.
"""
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if tree_id == target_tree_id:
self._move_child_within_tree(node, target, position)
else:
self._move_child_to_new_tree(node, target, position)
def _move_child_to_new_tree(self, node, target, position):
"""
Moves child node ``node`` to a different tree, inserting it
relative to the given ``target`` node in the new tree as
specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
new_tree_id = getattr(target, self.tree_id_attr)
space_target, level_change, left_right_change, parent, new_parent_right = \
self._calculate_inter_tree_move_values(node, target, position)
tree_width = right - left + 1
# Make space for the subtree which will be moved
self._create_space(tree_width, space_target, new_tree_id)
# Move the subtree
self._inter_tree_move_and_close_gap(
node, level_change, left_right_change, new_tree_id, parent.pk)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_child_within_tree(self, node, target, position):
"""
Moves child node ``node`` within its current tree relative to
the given ``target`` node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
width = right - left + 1
tree_id = getattr(node, self.tree_id_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
if position == 'last-child':
if target_right > right:
new_left = target_right - width
new_right = target_right - 1
else:
new_left = target_right
new_right = target_right + width - 1
else:
if target_left > left:
new_left = target_left - width + 1
new_right = target_left
else:
new_left = target_left + 1
new_right = target_left + width
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a sibling of any of its descendants.'))
if position == 'left':
if target_left > left:
new_left = target_left - width
new_right = target_left - 1
else:
new_left = target_left
new_right = target_left + width - 1
else:
if target_right > right:
new_left = target_right - width + 1
new_right = target_right
else:
new_left = target_right + 1
new_right = target_right + width
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_boundary = min(left, new_left)
right_boundary = max(right, new_right)
left_right_change = new_left - left
gap_size = width
if left_right_change > 0:
gap_size = -gap_size
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
# The level update must come before the left update to keep
# MySQL happy - left seems to refer to the updated value
# immediately after its update has been specified in the query
# with MySQL, but not with SQLite or Postgres.
move_subtree_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(move_subtree_query, [
left, right, level_change,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
node.pk, parent.pk,
tree_id])
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, new_left)
setattr(node, self.right_attr, new_right)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_root_node(self, node, target, position):
"""
Moves root node``node`` to a different tree, inserting it
relative to the given ``target`` node as specified by
``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
tree_id = getattr(node, self.tree_id_attr)
new_tree_id = getattr(target, self.tree_id_attr)
width = right - left + 1
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif tree_id == new_tree_id:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
space_target, level_change, left_right_change, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
# Create space for the tree which will be inserted
self._create_space(width, space_target, new_tree_id)
# Move the root node, making it a child node
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
move_tree_query = """
UPDATE %(table)s
SET %(level)s = %(level)s - %%s,
%(left)s = %(left)s - %%s,
%(right)s = %(right)s - %%s,
%(tree_id)s = %%s,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(left)s >= %%s AND %(left)s <= %%s
AND %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
}
cursor = connection.cursor()
cursor.execute(move_tree_query, [
level_change, left_right_change, left_right_change,
new_tree_id, node.pk, parent.pk, left, right, tree_id])
# Update the former root node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
| |
"""This module contains tests which are supposed to run on both root Marathon and Marathon on Marathon (MoM)."""
import apps
import common
import groups
import os
import os.path
import pytest
import retrying
import scripts
import shakedown
import time
from datetime import timedelta
from dcos import http, marathon
from shakedown import dcos_version_less_than, marthon_version_less_than, required_private_agents # NOQA
def test_launch_mesos_container():
"""Launches a Mesos container with a simple command."""
app_def = apps.mesos_app()
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_def["id"])
app = client.get_app(app_def["id"])
assert len(tasks) == 1, "The number of tasks is {} after deployment, but only 1 was expected".format(len(tasks))
assert app['container']['type'] == 'MESOS', "The container type is not MESOS"
def test_launch_docker_container():
"""Launches a Docker container on Marathon."""
app_def = apps.docker_http_server()
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
tasks = client.get_tasks(app_id)
app = client.get_app(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but only 1 was expected".format(len(tasks))
assert app['container']['type'] == 'DOCKER', "The container type is not DOCKER"
def test_launch_mesos_container_with_docker_image():
"""Launches a Mesos container with a Docker image."""
app_def = apps.ucr_docker_http_server()
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
tasks = client.get_tasks(app_id)
app = client.get_app(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but only 1 was expected".format(len(tasks))
assert app['container']['type'] == 'MESOS', "The container type is not MESOS"
# This fails on DC/OS 1.7, it is likely the version of Marathon in Universe for 1.7, is 1.1.5.
@shakedown.dcos_1_8
def test_launch_mesos_grace_period(marathon_service_name):
"""Tests 'taskKillGracePeriodSeconds' option using a Mesos container in a Marathon environment.
Read more details about this test in `test_root_marathon.py::test_launch_mesos_root_marathon_grace_period`
"""
app_def = apps.mesos_app()
default_grace_period = 3
grace_period = 20
app_def['fetch'] = [{"uri": "https://downloads.mesosphere.com/testing/test.py"}]
app_def['cmd'] = '/opt/mesosphere/bin/python test.py'
app_def['taskKillGracePeriodSeconds'] = grace_period
app_id = app_def['id'].lstrip('/')
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is not None
client.scale_app(app_id, 0)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is not None
# tasks should still be here after the default_grace_period
time.sleep(default_grace_period + 1)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is not None
# but not after the set grace_period
time.sleep(grace_period)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is None
def test_launch_docker_grace_period(marathon_service_name):
"""Tests 'taskKillGracePeriodSeconds' option using a Docker container in a Marathon environment.
Read more details about this test in `test_root_marathon.py::test_launch_mesos_root_marathon_grace_period`
"""
app_def = apps.docker_http_server()
app_def['container']['docker']['image'] = 'kensipe/python-test'
default_grace_period = 3
grace_period = 20
app_def['taskKillGracePeriodSeconds'] = grace_period
app_def['cmd'] = 'python test.py'
app_id = app_def['id'].lstrip('/')
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is not None
client.scale_app(app_id, 0)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is not None
# tasks should still be here after the default_graceperiod
time.sleep(default_grace_period + 1)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is not None
# but not after the set grace_period
time.sleep(grace_period)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is None
def test_docker_port_mappings():
"""Tests that Docker ports are mapped and are accessible from the host."""
app_def = apps.docker_http_server()
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_def["id"])
tasks = client.get_tasks(app_def["id"])
host = tasks[0]['host']
port = tasks[0]['ports'][0]
cmd = r'curl -s -w "%{http_code}"'
cmd = cmd + ' {}:{}/.dockerenv'.format(host, port)
status, output = shakedown.run_command_on_agent(host, cmd)
assert status
assert output == "200", "HTTP status code is {}, but 200 was expected".format(output)
def test_docker_dns_mapping(marathon_service_name):
"""Tests that a running Docker task is accessible via DNS."""
app_def = apps.docker_http_server()
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_def["id"])
bad_cmd = 'ping -c 1 docker-test.marathon-user.mesos-bad'
status, output = shakedown.run_command_on_master(bad_cmd)
assert not status
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_dns():
dnsname = '{}.{}.mesos'.format(app_def["id"].lstrip('/'), marathon_service_name)
cmd = 'ping -c 1 {}'.format(dnsname)
shakedown.wait_for_dns(dnsname)
status, output = shakedown.run_command_on_master(cmd)
assert status, "ping failed for app using DNS lookup: {}".format(dnsname)
check_dns()
def test_launch_app_timed():
"""Most tests wait until a task is launched with no reference to time.
This test verifies that if a app is launched on marathon that within 3 secs there is a task spawned.
"""
app_def = apps.mesos_app()
client = marathon.create_client()
client.add_app(app_def)
# if not launched in 3 sec fail
time.sleep(3)
tasks = client.get_tasks(app_def["id"])
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
def test_ui_available(marathon_service_name):
"""Simply verifies that a request to the UI endpoint is successful if Marathon is launched."""
response = http.get("{}/ui/".format(shakedown.dcos_service_url(marathon_service_name)))
assert response.status_code == 200, "HTTP status code is {}, but 200 was expected".format(response.status_code)
def test_task_failure_recovers():
"""Tests that if a task is KILLED, another one will be launched with a different ID."""
app_def = apps.sleep_app()
app_def['cmd'] = 'sleep 1000'
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_def["id"])
tasks = client.get_tasks(app_def["id"])
old_task_id = tasks[0]['id']
host = tasks[0]['host']
common.kill_process_on_host(host, '[s]leep 1000')
shakedown.deployment_wait()
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_new_task_id():
tasks = client.get_tasks(app_def["id"])
new_task_id = tasks[0]['id']
assert old_task_id != new_task_id, "The task ID has not changed: {}".format(old_task_id)
check_new_task_id()
@pytest.mark.skipif("shakedown.ee_version() == 'strict'")
def test_run_app_with_specified_user():
"""Runs an app with a given user (core). CoreOS is expected, since it has core user by default."""
app_def = apps.sleep_app()
app_def['user'] = 'core'
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
tasks = client.get_tasks(app_id)
task = tasks[0]
assert task['state'] == 'TASK_RUNNING', "The task is not running: {}".format(task['state'])
app = client.get_app(app_def["id"])
assert app['user'] == 'core', "The app's user is not core: {}".format(app['user'])
@pytest.mark.skipif("shakedown.ee_version() == 'strict'")
def test_run_app_with_non_existing_user():
"""Runs an app with a non-existing user, which should be failing."""
app_def = apps.sleep_app()
app_def['user'] = 'bad'
client = marathon.create_client()
client.add_app(app_def)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_failure_message():
app = client.get_app(app_def["id"])
message = app['lastTaskFailure']['message']
error = "No such user 'bad'"
assert error in message, f"Did not receive expected error messsage \"{error}\" but \"{message}\"" # noqa E999
check_failure_message()
def test_run_app_with_non_downloadable_artifact():
"""Runs an app with a non-downloadable artifact."""
app_def = apps.sleep_app()
app_def['fetch'] = [{"uri": "http://localhost/missing-artifact"}]
client = marathon.create_client()
client.add_app(app_def)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_failure_message():
app = client.get_app(app_def["id"])
message = app['lastTaskFailure']['message']
error = "Failed to fetch all URIs for container"
assert error in message, "Launched an app with a non-downloadable artifact"
check_failure_message()
def test_launch_group():
"""Launches a group of 2 apps."""
group_def = groups.sleep_group()
groups_id = group_def["groups"][0]["id"]
client = marathon.create_client()
client.create_group(group_def)
shakedown.deployment_wait()
group_apps = client.get_group(groups_id)
apps = group_apps['apps']
assert len(apps) == 2, "The numbers of apps is {} after deployment, but 2 is expected".format(len(apps))
@shakedown.private_agents(2)
def test_launch_and_scale_group():
"""Launches and scales a group."""
group_def = groups.sleep_group()
groups_id = group_def["groups"][0]["id"]
client = marathon.create_client()
client.create_group(group_def)
shakedown.deployment_wait()
group_apps = client.get_group(groups_id)
apps = group_apps['apps']
assert len(apps) == 2, "The number of apps is {}, but 2 was expected".format(len(apps))
app1_id = group_def["groups"][0]["apps"][0]["id"]
app2_id = group_def["groups"][0]["apps"][1]["id"]
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 1, "The number of tasks #1 is {} after deployment, but 1 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after deployment, but 1 was expected".format(len(tasks2))
# scale by 2 for the entire group
client.scale_group(groups_id, 2)
shakedown.deployment_wait()
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 2, "The number of tasks #1 is {} after scale, but 2 was expected".format(len(tasks1))
assert len(tasks2) == 2, "The number of tasks #2 is {} after scale, but 2 was expected".format(len(tasks2))
@shakedown.private_agents(2)
def test_scale_app_in_group():
"""Scales an individual app in a group."""
group_def = groups.sleep_group()
groups_id = group_def["groups"][0]["id"]
client = marathon.create_client()
client.create_group(group_def)
shakedown.deployment_wait()
group_apps = client.get_group(groups_id)
apps = group_apps['apps']
assert len(apps) == 2, "The number of apps is {}, but 2 was expected".format(len(apps))
app1_id = group_def["groups"][0]["apps"][0]["id"]
app2_id = group_def["groups"][0]["apps"][1]["id"]
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 1, "The number of tasks #1 is {} after deployment, but 1 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after deployment, but 1 was expected".format(len(tasks2))
# scaling just one app in the group
client.scale_app(app1_id, 2)
shakedown.deployment_wait()
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 2, "The number of tasks #1 is {} after scale, but 2 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after scale, but 1 was expected".format(len(tasks2))
@shakedown.private_agents(2)
def test_scale_app_in_group_then_group():
"""First scales an app in a group, then scales the group itself."""
group_def = groups.sleep_group()
groups_id = group_def["groups"][0]["id"]
client = marathon.create_client()
client.create_group(group_def)
shakedown.deployment_wait()
group_apps = client.get_group(groups_id)
apps = group_apps['apps']
assert len(apps) == 2, "The number of apps is {}, but 2 was expected".format(len(apps))
app1_id = group_def["groups"][0]["apps"][0]["id"]
app2_id = group_def["groups"][0]["apps"][1]["id"]
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 1, "The number of tasks #1 is {} after deployment, but 1 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after deployment, but 1 was expected".format(len(tasks2))
# scaling just one app in the group
client.scale_app(app1_id, 2)
shakedown.deployment_wait()
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 2, "The number of tasks #1 is {} after scale, but 2 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after scale, but 1 was expected".format(len(tasks2))
shakedown.deployment_wait()
# scaling the group after one app in the group was scaled
client.scale_group(groups_id, 2)
shakedown.deployment_wait()
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 4, "The number of tasks #1 is {} after scale, but 4 was expected".format(len(tasks1))
assert len(tasks2) == 2, "The number of tasks #2 is {} after scale, but 2 was expected".format(len(tasks2))
def assert_app_healthy(client, app_def, health_check):
app_def['healthChecks'] = [health_check]
instances = app_def['instances']
print('Testing {} health check protocol.'.format(health_check['protocol']))
client.add_app(app_def)
shakedown.deployment_wait(timeout=timedelta(minutes=5).total_seconds())
app = client.get_app(app_def["id"])
assert app['tasksRunning'] == instances, \
"The number of running tasks is {}, but {} was expected".format(app['tasksRunning'], instances)
assert app['tasksHealthy'] == instances, \
"The number of healthy tasks is {}, but {} was expected".format(app['tasksHealthy'], instances)
@shakedown.dcos_1_9
@pytest.mark.parametrize('protocol', ['HTTP', 'MESOS_HTTP', 'TCP', 'MESOS_TCP'])
def test_http_health_check_healthy(protocol):
"""Tests HTTP, MESOS_HTTP, TCP and MESOS_TCP health checks against a web-server in Python."""
app_def = apps.http_server()
client = marathon.create_client()
assert_app_healthy(client, app_def, common.health_check(protocol=protocol))
def test_app_with_no_health_check_not_healthy():
"""Makes sure that no task is marked as healthy if no health check is defined for the corresponding app."""
app_def = apps.sleep_app()
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, \
"The number of running tasks is {}, but 1 was expected".format(app['tasksRunning'])
assert app['tasksHealthy'] == 0, \
"The number of healthy tasks is {}, but 0 was expected".format(app['tasksHealthy'])
def test_command_health_check_healthy():
"""Tests COMMAND health check"""
app_def = apps.sleep_app()
client = marathon.create_client()
assert_app_healthy(client, app_def, common.command_health_check())
@shakedown.dcos_1_9
@pytest.mark.parametrize('protocol', ['HTTPS', 'MESOS_HTTPS'])
def test_https_health_check_healthy(protocol):
"""Tests HTTPS and MESOS_HTTPS health checks using a prepared nginx image that enables
SSL (using self-signed certificate) and listens on 443.
"""
# marathon version captured here will work for root and mom
requires_marathon_version('1.4.2')
client = marathon.create_client()
app_def = apps.docker_nginx_ssl()
assert_app_healthy(client, app_def, common.health_check(protocol=protocol, port_index=1))
def test_failing_health_check_results_in_unhealthy_app():
"""Tests failed health checks of an app. The health check is meant to never pass."""
app_def = apps.http_server()
app_def['healthChecks'] = [common.health_check('/bad-url', 'HTTP', failures=0, timeout=3)]
client = marathon.create_client()
client.add_app(app_def)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_failure_message():
app = client.get_app(app_def["id"])
print("{}, {}, {}".format(app['tasksRunning'], app['tasksHealthy'], app['tasksUnhealthy']))
assert app['tasksRunning'] == 1, \
"The number of running tasks is {}, but 1 was expected".format(app['tasksRunning'])
assert app['tasksHealthy'] == 0, \
"The number of healthy tasks is {}, but 0 was expected".format(app['tasksHealthy'])
assert app['tasksUnhealthy'] == 1, \
"The number of unhealthy tasks is {}, but 1 was expected".format(app['tasksUnhealthy'])
check_failure_message()
@shakedown.private_agents(2)
def test_task_gets_restarted_due_to_network_split():
"""Verifies that a health check fails in presence of a network partition."""
app_def = apps.http_server()
app_def['healthChecks'] = [common.health_check()]
common.pin_to_host(app_def, common.ip_other_than_mom())
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
app = client.get_app(app_def["id"])
assert app['tasksRunning'] == 1, \
"The number of running tasks is {}, but 1 was expected".format(app['tasksRunning'])
assert app['tasksHealthy'] == 1, \
"The number of healthy tasks is {}, but 1 was expected".format(app['tasksHealthy'])
tasks = client.get_tasks(app_def["id"])
task_id = tasks[0]['id']
host = tasks[0]['host']
port = tasks[0]['ports'][0]
# introduce a network partition
common.block_iptable_rules_for_seconds(host, port, sleep_seconds=10, block_input=True, block_output=False)
shakedown.deployment_wait()
app = client.get_app(app_def["id"])
tasks = client.get_tasks(app_def["id"])
new_task_id = tasks[0]['id']
assert task_id != new_task_id, "The task didn't get killed because of a failed health check"
assert app['tasksRunning'] == 1, \
"The number of running tasks is {}, but 1 was expected".format(app['tasksRunning'])
assert app['tasksHealthy'] == 1, \
"The number of healthy tasks is {}, but 0 was expected".format(app['tasksHealthy'])
# network partition should cause a task restart
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_health_message():
tasks = client.get_tasks(app_def["id"])
new_task_id = tasks[0]['id']
assert task_id != new_task_id, "The task has not been restarted: {}".format(task_id)
app = client.get_app(app_def["id"])
assert app['tasksRunning'] == 1, \
"The number of running tasks is {}, but 1 was expected".format(app['tasksRunning'])
assert app['tasksHealthy'] == 1, \
"The number of healthy tasks is {}, but 1 was expected".format(app['tasksHealthy'])
check_health_message()
def test_health_check_works_with_resident_task():
"""Verifies that resident tasks (common for Persistent Volumes) do not fail health checks.
Marathon bug: https://jira.mesosphere.com/browse/MARATHON-7050
"""
app_def = apps.resident_docker_app()
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(timeout=timedelta(minutes=10).total_seconds())
tasks = client.get_tasks(app_def["id"])
assert len(tasks) == 1, "The number of tasks is {}, but 1 was expected".format(len(tasks))
app = client.get_app(app_def["id"])
assert app['tasksHealthy'] == 1, \
"The number of healthy tasks is {}, but 1 was expected".format(app['tasksHealthy'])
@shakedown.private_agents(2)
def test_pinned_task_scales_on_host_only():
"""Tests that a pinned app scales only on the pinned node."""
app_def = apps.sleep_app()
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_def["id"])
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
assert tasks[0]['host'] == host, \
"The task is on {}, but it is supposed to be on {}".format(tasks[0]['host'], host)
client.scale_app(app_def["id"], 10)
shakedown.deployment_wait()
tasks = client.get_tasks(app_def["id"])
assert len(tasks) == 10, "The number of tasks is {} after scale, but 10 was expected".format(len(tasks))
for task in tasks:
assert task['host'] == host, "The task is on {}, but it is supposed to be on {}".format(task['host'], host)
@shakedown.private_agents(2)
def test_pinned_task_recovers_on_host():
"""Tests that when a pinned task gets killed, it recovers on the node it was pinned to."""
app_def = apps.sleep_app()
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_def["id"])
common.kill_process_on_host(host, '[s]leep')
shakedown.deployment_wait()
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_for_new_task():
new_tasks = client.get_tasks(app_def["id"])
assert tasks[0]['id'] != new_tasks[0]['id'], "The task did not get killed: {}".format(tasks[0]['id'])
assert new_tasks[0]['host'] == host, \
"The task got restarted on {}, but it was supposed to stay on {}".format(new_tasks[0]['host'], host)
check_for_new_task()
@shakedown.private_agents(2)
def test_pinned_task_does_not_scale_to_unpinned_host():
"""Tests when a task lands on a pinned node (and barely fits) and it is asked to scale past
the resources of that node, no tasks will be launched on any other node.
"""
app_def = apps.sleep_app()
app_id = app_def['id']
host = common.ip_other_than_mom()
print('Constraint set to host: {}'.format(host))
# the size of cpus is designed to be greater than 1/2 of a node
# such that only 1 task can land on the node.
cores = common.cpus_on_agent(host)
app_def['cpus'] = max(0.6, cores - 0.5)
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
client.scale_app(app_id, 2)
time.sleep(5)
deployments = client.get_deployments(app_id=app_id)
tasks = client.get_tasks(app_id)
# still deploying
assert len(deployments) == 1, "The number of deployments is {}, but 1 was expected".format(len(deployments))
assert len(tasks) == 1, "The number of tasks is {}, but 1 was expected".format(len(tasks))
@shakedown.private_agents(2)
def test_pinned_task_does_not_find_unknown_host():
"""Tests that a task pinned to an unknown host will not launch.
Within 10 secs it should still be in deployment and 0 tasks should be running.
"""
app_def = apps.sleep_app()
common.pin_to_host(app_def, '10.255.255.254')
client = marathon.create_client()
client.add_app(app_def)
# apps deploy within secs
# assuming that after 10 no tasks meets criteria
time.sleep(10)
tasks = client.get_tasks(app_def["id"])
assert len(tasks) == 0, "The number of tasks is {}, 0 was expected".format(len(tasks))
@shakedown.dcos_1_8
def test_restart_container_with_persistent_volume():
"""A task with a persistent volume, which writes to a file in the persistent volume, is launched.
The app is killed and restarted and we can still read from the persistent volume what was written to it.
"""
app_def = apps.persistent_volume_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
common.deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
host = tasks[0]['host']
port = tasks[0]['ports'][0]
cmd = "curl {}:{}/data/foo".format(host, port)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task(cmd, target_data):
run, data = shakedown.run_command_on_master(cmd)
assert run, "{} did not succeed".format(cmd)
assert data == target_data, "'{}' was not equal to {}".format(data, target_data)
check_task(cmd, target_data='hello\n')
client.restart_app(app_id)
common.deployment_wait(service_id=app_id)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_recovery():
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after recovery, but 1 was expected".format(len(tasks))
check_task_recovery()
host = tasks[0]['host']
port = tasks[0]['ports'][0]
cmd = "curl {}:{}/data/foo".format(host, port)
check_task(cmd, target_data='hello\nhello\n')
@shakedown.dcos_1_8
def test_app_with_persistent_volume_recovers():
"""Tests that when an app task with a persistent volume gets killed,
it recovers on the node it was launched on, and it gets attached
to the same persistent-volume."""
app_def = apps.persistent_volume_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
common.deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
task_id = tasks[0]['id']
port = tasks[0]['ports'][0]
host = tasks[0]['host']
cmd = "curl {}:{}/data/foo".format(host, port)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task(cmd, target_data):
run, data = shakedown.run_command_on_master(cmd)
assert run, "{} did not succeed".format(cmd)
assert target_data in data, "'{}' not found in {}".format(target_data, data)
check_task(cmd, target_data='hello\n')
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def kill_task(host, pattern):
pids = common.kill_process_on_host(host, pattern)
assert len(pids) != 0, "no task got killed on {} for pattern {}".format(host, pattern)
kill_task(host, '[h]ttp\\.server')
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_recovery():
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after recovery, but 1 was expected".format(len(tasks))
new_task_id = tasks[0]['id']
assert task_id != new_task_id, "The task ID has not changed, and is still {}".format(task_id)
check_task_recovery()
port = tasks[0]['ports'][0]
host = tasks[0]['host']
cmd = "curl {}:{}/data/foo".format(host, port)
check_task(cmd, target_data='hello\nhello\n')
def test_app_update():
"""Tests that an app gets successfully updated."""
app_def = apps.mesos_app()
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_def["id"])
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
app_def['cpus'] = 1
app_def['instances'] = 2
client.update_app(app_def["id"], app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_def["id"])
assert len(tasks) == 2, "The number of tasks is {} after deployment, but 2 was expected".format(len(tasks))
def test_app_update_rollback():
"""Tests that an updated app can be rolled back to its initial version."""
app_def = apps.readiness_and_health_app()
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
app_def['instances'] = 2
client.update_app(app_id, app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_id)
assert len(tasks) == 2, "The number of tasks is {} after update, but 2 was expected".format(len(tasks))
# provides a testing delay to rollback in the meantime
app_def['readinessChecks'][0]['intervalSeconds'] = 30
app_def['instances'] = 1
deployment_id = client.update_app(app_id, app_def)
client.rollback_deployment(deployment_id)
shakedown.deployment_wait()
# update to 1 instance is rollback to 2
tasks = client.get_tasks(app_id)
assert len(tasks) == 2, "The number of tasks is {} after rollback, but 2 was expected".format(len(tasks))
def test_unhealthy_app_can_be_rolled_back():
"""Verifies that an updated app gets rolled back due to being unhealthy."""
app_def = apps.readiness_and_health_app()
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
app_def['healthChecks'][0]['path'] = '/non-existent'
app_def['instances'] = 2
deployment_id = client.update_app(app_id, app_def)
try:
shakedown.deployment_wait()
except Exception:
client.rollback_deployment(deployment_id)
shakedown.deployment_wait()
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after rollback, but 1 was expected".format(len(tasks))
@shakedown.private_agents(2)
def test_marathon_with_master_process_failure(marathon_service_name):
"""Launches an app and restarts the master. It is expected that the service endpoint eventually comes back and
the task ID stays the same.
"""
app_def = apps.sleep_app()
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_def["id"])
original_task_id = tasks[0]['id']
common.systemctl_master('restart')
shakedown.wait_for_service_endpoint(marathon_service_name)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_recovery():
tasks = client.get_tasks(app_def["id"])
assert len(tasks) == 1, "The number of tasks is {} after master restart, but 1 was expected".format(len(tasks))
assert tasks[0]['id'] == original_task_id, \
"Task {} has not recovered, it got replaced with another one: {}".format(original_task_id, tasks[0]['id'])
check_task_recovery()
@shakedown.private_agents(2)
def test_marathon_when_disconnected_from_zk():
"""Launches an app from Marathon, then knocks out access to ZK from Marathon.
Verifies the task is preserved.
"""
app_def = apps.sleep_app()
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_def["id"])
original_task_id = tasks[0]['id']
common.block_iptable_rules_for_seconds(host, 2181, sleep_seconds=10, block_input=True, block_output=False)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_is_back():
tasks = client.get_tasks(app_def["id"])
assert tasks[0]['id'] == original_task_id, \
"The task {} got replaced with {}".format(original_task_id, tasks[0]['id'])
check_task_is_back()
@shakedown.private_agents(2)
def test_marathon_when_task_agent_bounced():
"""Launch an app and restart the node the task is running on."""
app_def = apps.sleep_app()
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_def["id"])
original_task_id = tasks[0]['id']
shakedown.restart_agent(host)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_is_back():
tasks = client.get_tasks(app_def["id"])
assert tasks[0]['id'] == original_task_id, \
"The task {} got replaced with {}".format(original_task_id, tasks[0]['id'])
check_task_is_back()
def test_default_user():
"""Ensures a task is started as root by default."""
app_def = apps.sleep_app()
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
app = client.get_app(app_def["id"])
user = app.get('user')
assert user is None, "User is {}, but it should not have been set".format(user)
tasks = client.get_tasks(app_def["id"])
host = tasks[0]['host']
success = shakedown.run_command_on_agent(host, "ps aux | grep '[s]leep ' | awk '{if ($1 !=\"root\") exit 1;}'")
assert success, "The app is running as non-root"
@common.marathon_1_4
def test_declined_offer_due_to_resource_role():
"""Tests that an offer gets declined because the role doesn't exist."""
app_def = apps.sleep_app()
app_def["acceptedResourceRoles"] = ["very_random_role"]
_test_declined_offer(app_def, 'UnfulfilledRole')
@common.marathon_1_4
def test_declined_offer_due_to_cpu_requirements():
"""Tests that an offer gets declined because the number of CPUs can't be found in an offer."""
app_def = apps.sleep_app()
app_def["cpus"] = 12345
_test_declined_offer(app_def, 'InsufficientCpus')
def _test_declined_offer(app_def, reason):
"""Used to confirm that offers were declined. The `processedOffersSummary` and these tests
in general require 1.4+ marathon with the queue end point.
The retry is the best possible way to "time" the success of the test.
"""
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def verify_declined_offer():
deployments = client.get_deployments(app_id)
assert len(deployments) == 1
offer_summary = client.get_queued_app(app_id)['processedOffersSummary']
role_summary = declined_offer_by_reason(offer_summary['rejectSummaryLastOffers'], reason)
last_attempt = declined_offer_by_reason(offer_summary['rejectSummaryLaunchAttempt'], reason)
assert role_summary['declined'] > 0, "There are no declined offers because of {}".format(reason)
assert role_summary['processed'] > 0, "There are no processed offers for {}".format(reason)
assert last_attempt['declined'] > 0, "There are no declined offers because of {}".format(reason)
assert last_attempt['processed'] > 0, "There are no processed offers for {}".format(reason)
verify_declined_offer()
def declined_offer_by_reason(offers, reason):
for offer in offers:
if offer['reason'] == reason:
del offer['reason']
return offer
return None
@pytest.mark.skipif("common.docker_env_not_set()")
def test_private_repository_docker_app():
username = os.environ['DOCKER_HUB_USERNAME']
password = os.environ['DOCKER_HUB_PASSWORD']
agents = shakedown.get_private_agents()
common.create_docker_credentials_file(username, password)
common.copy_docker_credentials_file(agents)
app_def = apps.private_docker_app()
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
common.assert_app_tasks_running(client, app_def)
def test_ping(marathon_service_name):
"""Tests the Marathon's /ping end-point."""
response = common.http_get_marathon_path('ping', marathon_service_name)
assert response.status_code == 200, "HTTP status code {} is NOT 200".format(response.status_code)
assert 'pong' in response.text, "Got {} instead of pong".format(response.text)
def test_metric_endpoint(marathon_service_name):
service_url = shakedown.dcos_service_url(marathon_service_name)
response = http.get("{}metrics".format(service_url))
assert response.status_code == 200, "HTTP status code {} is NOT 200".format(response.status_code)
response_json = response.json()
print(response_json['gauges'])
assert response_json['gauges']['service.mesosphere.marathon.app.count'] is not None, \
"service.mesosphere.marathon.app.count is absent"
def test_healtchcheck_and_volume():
"""Launches a Docker container on Marathon."""
app_def = apps.healthcheck_and_volume()
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
tasks = client.get_tasks(app_id)
app = client.get_app(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but only 1 was expected".format(len(tasks))
assert len(app['container']['volumes']) == 2, "The container does not have the correct amount of volumes"
# check if app becomes healthy
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_health():
app = client.get_app(app_id)
assert app['tasksHealthy'] == 1, "The app is not healthy"
check_health()
@shakedown.dcos_1_9
def test_vip_mesos_cmd(marathon_service_name):
"""Validates the creation of an app with a VIP label and the accessibility of the service via the VIP."""
app_def = apps.http_server()
vip_name = app_def["id"].lstrip("/")
fqn = '{}.{}.l4lb.thisdcos.directory'.format(vip_name, marathon_service_name)
app_def['portDefinitions'] = [{
"port": 0,
"protocol": "tcp",
"name": "{}".format(vip_name),
"labels": {
"VIP_0": "/{}:10000".format(vip_name)
}
}]
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def http_output_check():
time.sleep(1)
common.assert_http_code('{}:{}'.format(fqn, 10000))
http_output_check()
@shakedown.dcos_1_9
def test_vip_docker_bridge_mode(marathon_service_name):
"""Tests the creation of a VIP from a python command in a docker image using bridge mode.
the test validates the creation of an app with the VIP label and the accessability
of the service via the VIP.
"""
app_def = apps.docker_http_server()
vip_name = app_def["id"].lstrip("/")
fqn = '{}.{}.l4lb.thisdcos.directory'.format(vip_name, marathon_service_name)
app_def['id'] = vip_name
app_def['container']['docker']['portMappings'] = [{
"containerPort": 8080,
"hostPort": 0,
"labels": {
"VIP_0": "/{}:10000".format(vip_name)
},
"protocol": "tcp",
"name": "{}".format(vip_name)
}]
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def http_output_check():
time.sleep(1)
common.assert_http_code('{}:{}'.format(fqn, 10000))
http_output_check()
def requires_marathon_version(version):
"""This python module is for testing root and MoM marathons. The @marathon_1_5
annotation works only for the root marathon. The context switching necessary
for switching the marathons occurs after the evaluation of the pytestmark.
This function is used to ensure the correct version of marathon regardless
of root or mom.
"""
# marathon version captured here will work for root and mom
if shakedown.marthon_version_less_than(version):
pytest.skip()
@pytest.mark.parametrize("test_type, get_pinger_app, dns_format", [
('localhost', apps.pinger_localhost_app, '{}.{}.mesos'),
('bridge', apps.pinger_bridge_app, '{}.{}.mesos'),
('container', apps.pinger_container_app, '{}.{}.containerip.dcos.thisdcos.directory'),
])
@shakedown.dcos_1_9
@shakedown.private_agents(2)
def test_network_pinger(test_type, get_pinger_app, dns_format, marathon_service_name):
"""This test runs a pinger app and a relay app. It retrieves the python app from the
master via the new http service (which will be moving into shakedown). Then a curl call
to the relay will invoke a call to the 2nd pinger app and return back pong to the relay
then back to curl.
It tests that 1 task can network communicate to another task on the given network
It tests inbound and outbound connectivity
test_type param is not used. It is passed so that it is clear which parametrized test
is running or may be failing.
"""
pinger_app = get_pinger_app()
relay_app = get_pinger_app()
relay_app["id"] = relay_app["id"].replace("pinger", "relay")
pinger_dns = dns_format.format(pinger_app["id"].lstrip("/"), marathon_service_name)
relay_dns = dns_format.format(relay_app["id"].lstrip("/"), marathon_service_name)
# test pinger app to master
shakedown.copy_file_to_master(os.path.join(scripts.scripts_dir(), "pinger.py"))
client = marathon.create_client()
with shakedown.master_http_service():
# need to add app with http service in place or it will fail to fetch
client.add_app(pinger_app)
client.add_app(relay_app)
shakedown.deployment_wait()
shakedown.wait_for_dns(relay_dns)
relay_url = 'http://{}:7777/relay-ping?url={}:7777'.format(relay_dns, pinger_dns)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=60, retry_on_exception=common.ignore_exception)
def http_output_check():
status, output = shakedown.run_command_on_master('curl {}'.format(relay_url))
assert status
assert 'Pong {}'.format(pinger_app["id"]) in output
assert 'Relay from {}'.format(relay_app["id"]) in output
http_output_check()
@shakedown.dcos_1_11
def test_ipv6_healthcheck(docker_ipv6_network_fixture):
""" There is new feature in DC/OS 1.11 that allows containers running on IPv6 network to be healthchecked from
Marathon. This tests verifies executing such healthcheck.
"""
app_def = apps.ipv6_healthcheck()
client = marathon.create_client()
target_instances_count = app_def['instances']
client.add_app(app_def)
shakedown.deployment_wait(timeout=timedelta(minutes=1).total_seconds(), app_id=app_def['id'])
app = client.get_app(app_def["id"])
assert app['tasksRunning'] == target_instances_count, \
"The number of running tasks is {}, but {} was expected".format(app['tasksRunning'], target_instances_count)
assert app['tasksHealthy'] == target_instances_count, \
"The number of healthy tasks is {}, but {} was expected".format(app['tasksHealthy'], target_instances_count)
client.remove_app(app['id'], True)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
import tensorflow as tf
from tensorflow.python.platform import benchmark
FLAGS = tf.app.flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(np.random.rand(num_centers, num_dims).astype(np.float32) *
center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(np.random.randn(num_points, num_dims).astype(np.float32) *
max_offset)
return (centers[assignments] + offsets,
assignments,
np.add.reduce(offsets * offsets, 1))
class KMeansTestBase(tf.test.TestCase):
def input_fn(self, batch_size=None, points=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = tf.constant(points)
if batch_size == num_points:
return x, None
indices = tf.random_uniform(tf.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=tf.int32,
seed=10)
return tf.gather(x, indices), None
return _fn
@staticmethod
def config(tf_random_seed):
return tf.contrib.learn.RunConfig(tf_random_seed=tf_random_seed)
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 10000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
self.kmeans = tf.contrib.learn.KMeansClustering(
self.num_centers,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
use_mini_batch=self.use_mini_batch,
config=self.config(14),
random_seed=10)
def test_clusters(self):
kmeans = self.kmeans
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape),
[self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self.kmeans
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
return
kmeans = tf.contrib.learn.KMeansClustering(
self.num_centers,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
use_mini_batch=self.use_mini_batch,
config=tf.contrib.learn.RunConfig(tf_random_seed=14),
random_seed=12)
kmeans.fit(input_fn=self.input_fn(),
# Force it to train forever until the monitor stops it.
steps=None,
relative_tolerance=1e-4)
score = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.005)
def test_infer(self):
kmeans = self.kmeans
kmeans.fit(input_fn=self.input_fn(), relative_tolerance=1e-4)
clusters = kmeans.clusters()
# Make a small test set
num_points = 10
points, true_assignments, true_offsets = make_random_points(clusters,
num_points)
# Test predict
assignments = kmeans.predict(input_fn=self.input_fn(batch_size=num_points,
points=points))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(input_fn=lambda: (tf.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(input_fn=lambda: (tf.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) -
2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_fit_raise_if_num_clusters_larger_than_num_points_random_init(self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError('less'):
kmeans = tf.contrib.learn.KMeansClustering(
num_clusters=3, initial_clusters=tf.contrib.factorization.RANDOM_INIT)
kmeans.fit(input_fn=lambda: (tf.constant(points), None), steps=10)
def test_fit_raise_if_num_clusters_larger_than_num_points_kmeans_plus_plus(
self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError(AssertionError):
kmeans = tf.contrib.learn.KMeansClustering(
num_clusters=3,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT)
kmeans.fit(input_fn=lambda: (tf.constant(points), None), steps=10)
class KMeansTestCosineDistance(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2],
[0.1, 2.5], [0.2, 2], [0.1, 3], [0.2, 4]], dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[normalize(np.mean(normalize(self.points)[0:4, :],
axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(self.points)[4:, :],
axis=0,
keepdims=True))[0]], dtype=np.float32)
self.true_assignments = [0] * 4 + [1] * 4
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = tf.contrib.learn.KMeansClustering(
self.num_centers,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(3))
def test_fit(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
self.assertAllClose(np.sort(centers, axis=0),
np.sort(self.true_centers, axis=0))
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn())
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=30)
centers = normalize(self.kmeans.clusters())
self.assertAllClose(np.sort(centers, axis=0),
np.sort(self.true_centers, axis=0), atol=1e-2)
assignments = self.kmeans.predict(input_fn=self.input_fn())
self.assertAllClose(centers[assignments],
self.true_centers[self.true_assignments], atol=1e-2)
score = self.kmeans.score(input_fn=self.input_fn(), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array([[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3],
[-3.1, -3.2], [-2.8, -3.], [-2.9, -3.1], [-3., -3.1],
[-3., -3.1], [-3.2, -3.], [-3., -3.]], dtype=np.float32)
true_centers = np.array(
[normalize(np.mean(normalize(points)[0:2, :], axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[2:4, :], axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0]], dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(normalize(points),
true_centers[true_assignments])
kmeans = tf.contrib.learn.KMeansClustering(
3,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(3))
kmeans.fit(input_fn=lambda: (tf.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(sorted(centers.tolist()),
sorted(true_centers.tolist()),
atol=1e-2)
assignments = kmeans.predict(input_fn=lambda: (tf.constant(points), None))
self.assertAllClose(centers[assignments],
true_centers[true_assignments], atol=1e-2)
score = kmeans.score(input_fn=lambda: (tf.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 450
@property
def use_mini_batch(self):
return True
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self, dimension=50, num_clusters=50, points_per_cluster=10000,
center_norm=500, cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(self.num_clusters, dimension,
center_norm=center_norm)
self.points, _, scores = make_random_points(self.centers, self.num_points,
max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(iters=num_iters, wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = tf.contrib.learn.KMeansClustering(
self.num_clusters,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
tf_kmeans.fit(input_fn=lambda: (tf.constant(self.points), None),
steps=50,
relative_tolerance=1e-6)
_ = tf_kmeans.clusters()
scores.append(tf_kmeans.score(
input_fn=lambda: (tf.constant(self.points), None), steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(n_clusters=self.num_clusters,
init='k-means++',
max_iter=50, n_init=1, tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
if __name__ == '__main__':
tf.test.main()
| |
import os
import sys
__author__ = 'bakl'
ROOT_DIRECTORY = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
plugin_path = os.path.join(ROOT_DIRECTORY, 'plugin')
def lc_wrapper(param, p=None, method='plot'):
a = param.split(':')
fname = a.pop(0)
if p is None:
if os.path.isfile(fname + '.py'):
p, fname = os.path.split(fname)
elif os.path.isfile(os.path.join(os.getcwd(), fname + '.py')):
p = os.getcwd()
else:
p = plugin_path
# print("Call: {} from {}".format(fname, p))
c = CallBack(fname, path=p, args=a, method=method, load=1)
print("Call: %s from %s" % (c.Func, c.FuncFileFull))
return c
def observations(args):
"""
Get observations from argument line
:param args: argument line parsed with argparse.ArgumentParser
:return: data from callbacks
"""
if not args.call or len(args.call) == 0:
return None
if len(args.call) == 1:
callback = lc_wrapper(args.call[0], method='load')
else: # len(args.call) > 1
a = []
for line in args.call:
c = lc_wrapper(line, method='load')
a.append(c)
callback = CallBackArray(a)
if callback is None:
return None
# Get observations
obs = callback.run({'is_debug': args.is_not_quiet})
return obs
class CallBack(object):
def __init__(self, fname, path='./', args=None, method=None, load=1):
self._func = None
self._path = path
self._fname = fname
self._args = args
if fname is not None and load == 1:
if method is None:
self._func = self.find_func
else:
self._func = self.find_method(method)
@property
def Path(self):
return self._path
@property
def FileName(self):
return self._fname
@property
def FuncFile(self):
return self.FileName + '.py'
@property
def FuncFileFull(self):
return os.path.join(self.Path, self.FuncFile)
@property
def Func(self):
return self._func
def get_arg(self, idx):
if self._args is None:
return None
if len(self._args) > idx:
return self._args[idx]
return None
def set_arg(self, idx, val):
if idx < 0:
raise Exception("Index should be more 0 [idx = %d]" % idx)
if self._args is None and idx > 0:
raise Exception("Index should be 0, if self._args is None [idx = %d]" % idx)
if len(self._args) > idx:
raise Exception("Index should be less then len(self._args)=%d [idx = %d]" % (len(self._args), idx))
if self._args is None and idx == 0:
self._args = []
self._args[idx] = val
elif len(self._args) > idx:
self._args[idx] = val
elif len(self._args) == idx:
self._args.append(val)
return self
def add_arg(self, val):
if self._args is None:
idx = 0
else:
idx = len(self._args)
self.set_arg(idx, val)
def put_args(self, args):
self._args = args
def arg_totext(self, idx):
res = self.get_arg(idx)
if res is None:
return ''
return res
@property
def find_func(self):
possibles = globals().copy()
possibles.update(locals())
method = possibles.get(self.FileName)
if not method and os.path.isfile(self.FuncFileFull): # find in files
sys.path.append(self.Path)
py_mod = __import__(self.FileName, fromlist=['run', 'plot', 'load'])
if hasattr(py_mod, 'run'):
method = getattr(__import__(self.FileName), 'run')
elif hasattr(py_mod, 'plot'):
method = getattr(__import__(self.FileName), 'plot')
if hasattr(py_mod, 'load'):
method_load = getattr(__import__(self.FileName), 'load')
if not method:
raise Exception("Method %s not implemented" % self._fname)
return method
def find_method(self, method_name):
method = None
if os.path.isfile(self.FuncFileFull): # find in files
sys.path.append(self.Path)
py_mod = __import__(self.FileName, fromlist=[method_name])
if hasattr(py_mod, method_name):
method = getattr(__import__(self.FileName), method_name)
if not method:
raise Exception("Method %s not implemented" % self._fname)
return method
@property
def find_load(self):
possibles = globals().copy()
possibles.update(locals())
method = possibles.get(self.FileName)
if not method and os.path.isfile(self.FuncFileFull): # find in files
sys.path.append(self.Path)
py_mod = __import__(self.FileName, fromlist=['load'])
if hasattr(py_mod, 'load'):
method = getattr(__import__(self.FileName), 'load')
# method_load = getattr(__import__(self.FileName), 'load')
if not method:
raise Exception("Method load in %s not implemented" % self._fname)
return method
def plot(self, ax, dic=None):
if dic is None:
dic = {}
if self._args is not None:
dic['args'] = self._args[:]
# if len(args) > 0:
# dic..extend(args)
# a.append(args)
return self._func(ax, dic)
def run(self, dic=None):
if dic is None:
dic = {}
if self._args is not None:
dic['args'] = self._args[:]
return self._func(dic)
def load(self, dic=None):
if dic is None:
dic = {}
if self._args is not None:
dic['args'] = self._args[:]
return self._func(dic)
class CallBackArray(CallBack):
def __init__(self, calls, fname=None):
super(CallBackArray, self).__init__(fname)
self._calls = calls
def get_arg(self, idx):
return super(CallBackArray, self).get_arg(idx)
def set_arg(self, idx, val):
super(CallBackArray, self).set_arg(idx, val)
return self
def add_arg(self, val):
super(CallBackArray, self).add_arg(val)
def put_args(self, args):
super(CallBackArray, self).put_args(args)
def plot(self, ax, dic=None):
import collections
if dic is None:
dic = {}
res = []
if self._args is not None:
dic['args'] = self._args[:]
if isinstance(ax, collections.Sequence):
for i, c in enumerate(self._calls):
r = c.plot(ax[i], dic)
res.append(r)
else:
for c in self._calls:
r = c.plot(ax, dic)
res.append(r)
return res
def run(self, dic=None):
if dic is None:
dic = {}
if self._args is not None:
dic['args'] = self._args[:]
res = []
for c in self._calls:
res.append(c.run(dic))
return res
def load(self, dic=None):
if dic is None:
dic = {}
if self._args is not None:
dic['args'] = self._args[:]
return self._func(dic)
| |
from fabric.api import *
vars = {
'app_dir': '/usr/local/apps/land_owner_tools/lot',
'venv': '/usr/local/venv/lot',
'sitename': 'localhost:8080',
'branch': 'master'
}
env.forward_agent = True
env.key_filename = '~/.vagrant.d/insecure_private_key'
try:
from fab_vars import *
fab_vars_exists = True
except ImportError:
fab_vars_exists = False
def dev():
""" Use development server settings """
servers = ['vagrant@127.0.0.1:2222']
env.hosts = servers
return servers
def prod_dev():
try:
if fab_vars_exists:
if DEV_BRANCH:
vars['branch'] = DEV_BRANCH
if DEV_SITENAME:
vars['sitename'] = DEV_SITENAME
if DEV_SERVER:
servers = DEV_SERVER
else:
servers = ['vagrant@127.0.0.1:2222']
env.hosts = servers
return servers
else:
raise Exception("\nERROR: Cannot import file fab_vars.py. Have you created one from the template fab_vars.py.template?\n")
except Exception as inst:
print inst
def stage():
""" Use production server settings """
try:
if fab_vars_exists:
vars['branch'] = STAGE_BRANCH
if AWS_KEY_FILENAME_STAGE:
env.key_filename = AWS_KEY_FILENAME_STAGE
else:
env.key_filename = None
if STAGE_BRANCH:
vars['branch'] = STAGE_BRANCH
servers = AWS_PUBLIC_DNS_STAGE
env.hosts = servers
vars['sitename'] = AWS_SITENAME_STAGE
return servers
else:
raise Exception("\nERROR: Cannot import file fab_vars.py. Have you created one from the template fab_vars.py.template?\n")
except Exception as inst:
print inst
def prod():
""" Use production server settings """
try:
if fab_vars_exists:
vars['branch'] = PROD_BRANCH
env.key_filename = AWS_KEY_FILENAME_PROD
servers = AWS_PUBLIC_DNS_PROD
env.hosts = servers
vars['sitename'] = AWS_SITENAME_PROD
return servers
else:
raise Exception("\nERROR: Cannot import file fab_vars.py. Have you created one from the template fab_vars.py.template?\n")
except Exception as inst:
print inst
def test():
""" Use test server settings """
servers = ['ninkasi']
env.hosts = servers
return servers
def all():
""" Use all servers """
env.hosts = dev() + prod() + test()
def _install_requirements():
run('cd %(app_dir)s && %(venv)s/bin/pip install distribute' % vars)
run('cd %(app_dir)s && %(venv)s/bin/pip install -r ../requirements.txt' % vars)
def _install_django():
run('cd %(app_dir)s && %(venv)s/bin/python manage.py syncdb --noinput && \
%(venv)s/bin/python manage.py migrate --noinput && \
%(venv)s/bin/python manage.py install_media -a && \
%(venv)s/bin/python manage.py enable_sharing --all && \
%(venv)s/bin/python manage.py site %(sitename)s && \
%(venv)s/bin/python manage.py install_cleangeometry' % vars)
def _recache():
run('cd %(app_dir)s && %(venv)s/bin/python manage.py clear_cache' % vars)
# run('cd %(app_dir)s && %(venv)s/bin/python manage.py clear_cache && \
# %(venv)s/bin/python manage.py precache' % vars)
def manage(command):
""" Runs any manage.py command on the server """
vars['command'] = command
run('cd %(app_dir)s && %(venv)s/bin/python manage.py %(command)s' % vars)
del vars['command']
def create_superuser():
""" Create the django superuser (interactive!) """
run('cd %(app_dir)s && %(venv)s/bin/python manage.py createsuperuser' % vars)
def import_data():
""" Fetches and installs data fixtures (WARNING: 5+GB of data; hence not checking fixtures into the repo) """
run('cd %(app_dir)s && %(venv)s/bin/python manage.py import_data' % vars)
def init():
""" Initialize the forest planner application """
_install_requirements()
_install_django()
_install_starspan()
_recache()
#restart_services()
def restart_services():
run('sudo service uwsgi restart')
run('sudo service nginx restart')
run('sudo supervisorctl reload || sudo service supervisor start')
run('sleep 2 && sudo supervisorctl status')
def status():
init_services = ['postgresql', 'redis-server', 'supervisor']
for service in init_services:
run('sudo service %s status' % service)
run('sudo supervisorctl status')
run('sudo ps -eo pid,%cpu,%mem,comm,args --sort=-%cpu,-%mem | head -n 10')
ON = """\n !!!! maintenance_mode is on !!!!
Test and run \n fab <server> maintenance:off
when it's good to go
"""
OFF = """\n !!!! maintenance_mode is OFF; site is live !!!!"""
run('test -f /tmp/.maintenance_mode && echo "%s" || echo "%s"' % (ON, OFF))
def install_media():
""" Run the django install_media command """
run('cd %(app_dir)s && %(venv)s/bin/python manage.py install_media' % vars)
def copy_media():
""" Just copy the basic front end stuff. Speed! """
run('rsync -rtvu /usr/local/apps/land_owner_tools/media/common/ /usr/local/apps/land_owner_tools/mediaroot/common' % vars)
def runserver():
""" Run the django dev server on port 8000 """
run('cd %(app_dir)s && %(venv)s/bin/python manage.py runserver 0.0.0.0:8000' % vars)
def update():
""" Sync with correct branch in git repo """
with settings(
warn_only=True
):
if run('cd %(app_dir)s && \
git fetch && \
git show-ref --verify --quiet refs/heads/%(branch)s' % vars):
run('cd %(app_dir)s && \
git checkout %(branch)s && \
git merge origin/%(branch)s' % vars)
else:
run('cd %(app_dir)s && \
git checkout -b %(branch)s origin/%(branch)s && \
git merge origin/%(branch)s' % vars)
def branch():
run('cd %(app_dir)s && \
git fetch && \
git checkout -b %(branch)s && \
git merge origin/%(branch)s' % vars)
def _install_starspan():
run('mkdir -p ~/src && cd ~/src && \
if [ ! -d "starspan" ]; then git clone https://github.com/Ecotrust/starspan.git; fi && \
cd starspan && \
if [ ! `which starspan` ]; then ./configure && make && sudo make install; fi')
def deploy():
"""
Deploy to a staging/production environment
"""
for s in env.hosts:
if 'vagrant' in s:
raise Exception("You can't deploy() to local dev, just use `init restart_services`")
maintenance("on")
update()
init()
restart_services()
maintenance("off")
def maintenance(status):
"""
turn maintenance mode on or off
fab dev maintenance:on
fab dev maintenance:off
"""
if status == "on":
run("touch /tmp/.maintenance_mode")
else:
run("rm /tmp/.maintenance_mode")
def provision():
"""
Run puppet on a staging/production environment
"""
stage = False
for s in env.hosts:
if 'vagrant' in s:
raise Exception("You can't provision() on local dev, just vagrant up/provision")
if 'stage' in s:
stage = True
maintenance("on")
update()
# see lot.pp for defaults
if stage:
num_cpus = AWS_VARS_STAGE.get("num_cpus", 1)
postgres_shared_buffers = AWS_VARS_STAGE.get("postgres_shared_buffers", "48MB")
shmmax = AWS_VARS_STAGE.get("shmmax", 67108864)
else: # assume prod
num_cpus = AWS_VARS_PROD.get("num_cpus", 1)
postgres_shared_buffers = AWS_VARS_PROD.get("postgres_shared_buffers", "48MB")
shmmax = AWS_VARS_PROD.get("shmmax", 67108864)
run("""sudo \
facter_user=ubuntu \
facter_group=ubuntu \
facter_url_base=http://%s \
facter_num_cpus=%s \
facter_postgres_shared_buffers=%s \
facter_shmmax=%s \
facter_pgsql_base=/var/lib/postgresql/ puppet apply \
--templatedir=/usr/local/apps/land_owner_tools/scripts/puppet/manifests/files \
--modulepath=/usr/local/apps/land_owner_tools/scripts/puppet/modules \
/usr/local/apps/land_owner_tools/scripts/puppet/manifests/lot.pp
""" % (env.host,
num_cpus,
postgres_shared_buffers,
shmmax
)
)
restart_services()
status()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# jan 2014 bbb garden shield attempt
# AKA
'''
Sensors:
analog level sensor, pin AIN0
TMP102 i2c temperature sensor, address 0x48
(if add0 is grounded) or 0x49 (if pulled up)
Outputs:
Analog RGB LED strip
I2C display(?)
Pump Activate/Deactivate (GPIO pin)
Some measurements as of mid-March 2014:
Tank can be pumped for 15 minutes without sun exposure to liquid.
Seems like after 10 minutes of pumping, the autosiphon engages, though.
Tank takes about 17 minutes to drain from a 15-minute pump
11 gals in reservoir reads as 0.42 on the adc.read scale from 0 to 1
8 gals in reservoir reads as 0.175 on the adc.read scale from 0 to 1
7 gals in reservoir reads as 0.15 on the adc.read scale from 0 to 1
'''
from __future__ import division
import Adafruit_SSD1306 as ssd
import Adafruit_BBIO.UART as uart
import Image
import ImageDraw
import ImageFont
# import Adafruit_GPIO.PWM as pwm
import Adafruit_BBIO.GPIO as gpio
import Adafruit_BBIO.ADC as adc
# import TMP102 as tmp102
import datetime
from dateutil.tz import tzlocal
import time
import serial
import atexit
from math import log
import requests
import key as k
import logging
BCOEFFICIENT = 3950 # thermistor beta coefficient
THERMISTORNOMINAL = 10000
TEMPERATURENOMINAL = 25.0
SERIESRESISTOR = 10000
# a1 = blue and white, which is bed temp
# a2 = white and orange, which is tank temp
interval = 60 # seconds between samples
greenPin = 'P8_13'
bluePin = 'P9_14'
redPin = 'P8_19'
servoPin = 'P9_16'
tankPin = 'P9_39'
photoPin = 'P9_38'
thermistor1 = 'P9_40' # AIN1, bed temp
thermistor2 = 'P9_37' # AIN2, reservoir temp
pumpPin = 'P8_10'
RST = 'P8_10' # OLED screen reset pin, not always necessary
readings = {}
PUMP_INTERVAL = 60 # minutes between pump actuations
PUMP_DURATION = 12 # minutes to run pump
def exit_handler():
print 'exiting'
gpio.output(pumpPin,gpio.LOW)
gpio.cleanup()
uart.cleanup()
def do_sensor_read():
print 'sensor read'
global readings
readings = {}
# value = ADC.read("AIN1")
# adc returns value from 0 to 1.
# use read_raw(pin) to get V values
# tank = adc.read(tankPin)
tank = adc.read(tankPin) # have to read twice due to bbio bug
print 'tank is %s' % tank
time.sleep(1)
# photo = adc.read(photoPin) # have to read twice due to bbio bug
photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest
print 'photo is %s' % photo
time.sleep(1)
# temp1 = adc.read_raw(thermistor1)
temp1 = adc.read_raw(thermistor1)
time.sleep(1)
print 'temp1 raw %s' % temp1
temp1 = convert_thermistor_special(temp1)
readings['bedTemp'] = temp1
print 'converted bed_temp is %s' % temp1
# # do conversion per
# # http://learn.adafruit.com/thermistor/using-a-thermistor
# temp2 = adc.read_raw(thermistor2)
temp2 = adc.read_raw(thermistor2)
time.sleep(1)
print 'temp2 raw %s' % temp2
print temp2
temp2 = convert_thermistor(temp2)
readings['tankTemp'] = temp2
print 'converted reservoir_temp is %s' % temp2
# do conversion per
# http://learn.adafruit.com/thermistor/using-a-thermistor
# tmp36reading = adc.read_raw(tmp36Pin)
# tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug
# millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV
# temp_c = (millivolts - 500) / 10
# print temp_c
# ph_val = get_ph()
# print 'ph_val was thoght to be %s' % ph_val
readings['tankLevel'] = tank # tank level
readings['photocell'] = photo # photocell
def convert_thermistor(raw):
# convert the value to resistance
# print 'was given %s' % raw
raw = SERIESRESISTOR/((1800.0/raw) - 1.0)
# raw = float(SERIESRESISTOR / float(raw))
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def convert_thermistor_special(raw):
# convert the value to resistance
# print 'was given %s' % raw
# raw = (1800/raw) - 1
# fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!
raw = 3730.0/((1800.0/raw) - 1.0)
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def do_db_update():
print 'db update'
global readings
# print readings
if len(readings) != 0:
# data.sparkfun.com is expecting:
# bedTemp, photo, tankLevel, tankTemp
bedTemp = float('{0:.2f}'.format(readings['bedTemp']))
tankTemp = float('{0:.2f}'.format(readings['tankTemp']))
payload = {
'photo':readings['photocell'],
'tankLevel':readings['tankLevel'],
'bedTemp':readings['bedTemp'],
'tankTemp':readings['tankTemp']
}
h = {'Phant-Private-Key':k.key['phant_private']}
r = requests.post(k.key['phant_url'], data=payload, headers=h)
print 'wrote a result set to the DB'
else:
print 'NULL readings, nothing written to DB'
def get_ph():
print 'we are in get_ph'
uart.setup('UART2')
ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)
print 'opened serial port'
ser.open()
ser.write('R\r')
data = ser.read()
print 'ph received raw as %s' % data
ser.close()
uart.cleanup()
return data
def do_state_display():
print 'state_display'
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font.
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('Vdj.ttf', 8)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
draw.text((x, top), 'photo: ', font=font, fill=255)
draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)
draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)
draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)
draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)
draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)
draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255)
draw.text((x+64, top+48), str(readings['bedTemp'])[:4], font=font, fill=255)
# Draw an ellipse.
# draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a rectangle.
# draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a triangle.
# draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
# x += shape_width+padding
# Draw an X.
# draw.line((x, bottom, x+shape_width, top), fill=255)
# draw.line((x, top, x+shape_width, bottom), fill=255)
# x += shape_width+padding
# Display image.
disp.image(image)
disp.display()
# so, what will state display be?
# I2C display of tank temp?
def do_pump_toggle():
print 'pump actuate'
'''
this should actually work like:
if currentMinute mod PUMP_DURATION < PUMP_INTERVAL:
activate pump
else:
turn off pump
'''
if (datetime.datetime.today().hour>6 and datetime.datetime.today().hour<23):
print 'within actuating timeframe'
# changed this to just pump for the first PUMP_DURATION minutes every hour
if(datetime.datetime.today().minute <= PUMP_DURATION):
print 'we are in the first %s minutes of the hour, so pump should be on.' % PUMP_DURATION
gpio.output(pumpPin,gpio.HIGH)
else:
print 'shutting off pump at %s' % datetime.datetime.today().minute
gpio.output(pumpPin,gpio.LOW)
else:
print 'it is the actuator quiet period, between 11pm and 6am'
gpio.output(pumpPin,gpio.LOW)
print 'starting sampling at'
print datetime.datetime.now(tzlocal())
logging.basicConfig(filename='example.log',level=logging.DEBUG)
# adc.setup(thermistor1)
# adc.setup(thermistor2)
# adc.setup(photoPin)
adc.setup()
# uart.setup('UART2')
# print 'uart setup'
gpio.setup(pumpPin,gpio.OUT)
# t = tmp102.TMP102()
disp = ssd.SSD1306_128_64(rst=RST,i2c_address=0x3D)
disp.begin()
disp.clear()
disp.display()
# NOTE
# There is currently a bug in the ADC driver.
# You'll need to read the values twice
# in order to get the latest value.
# pwm.start(greenPin, 10.0, 2000.0)
# pwm.start(redPin, 10.0, 2000.0)
# pwm.start(bluePin, 10.0, 2000.0)
atexit.register(exit_handler)
while True:
try:
do_sensor_read()
except Exception, e:
print e
print 'sensor_read error!'
try:
do_db_update()
except Exception, e:
print e
print 'do_db_update error!'
try:
do_state_display()
# pass
except Exception, e:
print e
print 'do_state_display error!'
try:
do_pump_toggle()
except Exception, e:
print e
print 'do_pump_toggle error!'
print 'done with cycle, now waiting %s' % datetime.datetime.today()
time.sleep(interval)
| |
"""
kombu.connection
================
Broker connection and pools.
"""
from __future__ import absolute_import
import os
import socket
from contextlib import contextmanager
from itertools import count, cycle
from operator import itemgetter
# jython breaks on relative import for .exceptions for some reason
# (Issue #112)
from kombu import exceptions
from .five import Empty, range, string_t, text_t, LifoQueue as _LifoQueue
from .log import get_logger
from .transport import get_transport_cls, supports_librabbitmq
from .utils import cached_property, retry_over_time, shufflecycle, HashedSeq
from .utils.compat import OrderedDict
from .utils.functional import lazy
from .utils.url import as_url, parse_url, quote, urlparse
__all__ = ['Connection', 'ConnectionPool', 'ChannelPool']
RESOLVE_ALIASES = {'pyamqp': 'amqp',
'librabbitmq': 'amqp'}
_LOG_CONNECTION = os.environ.get('KOMBU_LOG_CONNECTION', False)
_LOG_CHANNEL = os.environ.get('KOMBU_LOG_CHANNEL', False)
logger = get_logger(__name__)
roundrobin_failover = cycle
failover_strategies = {
'round-robin': roundrobin_failover,
'shuffle': shufflecycle,
}
class Connection(object):
"""A connection to the broker.
:param URL: Broker URL, or a list of URLs, e.g.
.. code-block:: python
Connection('amqp://guest:guest@localhost:5672//')
Connection('amqp://foo;amqp://bar', failover_strategy='round-robin')
Connection('redis://', transport_options={
'visibility_timeout': 3000,
})
import ssl
Connection('amqp://', login_method='EXTERNAL', ssl={
'ca_certs': '/etc/pki/tls/certs/something.crt',
'keyfile': '/etc/something/system.key',
'certfile': '/etc/something/system.cert',
'cert_reqs': ssl.CERT_REQUIRED,
})
.. admonition:: SSL compatibility
SSL currently only works with the py-amqp, amqplib, and qpid
transports. For other transports you can use stunnel.
:keyword hostname: Default host name/address if not provided in the URL.
:keyword userid: Default user name if not provided in the URL.
:keyword password: Default password if not provided in the URL.
:keyword virtual_host: Default virtual host if not provided in the URL.
:keyword port: Default port if not provided in the URL.
:keyword ssl: Use SSL to connect to the server. Default is ``False``.
May not be supported by the specified transport.
:keyword transport: Default transport if not specified in the URL.
:keyword connect_timeout: Timeout in seconds for connecting to the
server. May not be supported by the specified transport.
:keyword transport_options: A dict of additional connection arguments to
pass to alternate kombu channel implementations. Consult the transport
documentation for available options.
:keyword heartbeat: Heartbeat interval in int/float seconds.
Note that if heartbeats are enabled then the :meth:`heartbeat_check`
method must be called regularly, around once per second.
.. note::
The connection is established lazily when needed. If you need the
connection to be established, then force it by calling
:meth:`connect`::
>>> conn = Connection('amqp://')
>>> conn.connect()
and always remember to close the connection::
>>> conn.release()
"""
port = None
virtual_host = '/'
connect_timeout = 5
_closed = None
_connection = None
_default_channel = None
_transport = None
_logger = False
uri_prefix = None
#: The cache of declared entities is per connection,
#: in case the server loses data.
declared_entities = None
#: Iterator returning the next broker URL to try in the event
#: of connection failure (initialized by :attr:`failover_strategy`).
cycle = None
#: Additional transport specific options,
#: passed on to the transport instance.
transport_options = None
#: Strategy used to select new hosts when reconnecting after connection
#: failure. One of "round-robin", "shuffle" or any custom iterator
#: constantly yielding new URLs to try.
failover_strategy = 'round-robin'
#: Heartbeat value, currently only supported by the py-amqp transport.
heartbeat = None
hostname = userid = password = ssl = login_method = None
def __init__(self, hostname='localhost', userid=None,
password=None, virtual_host=None, port=None, insist=False,
ssl=False, transport=None, connect_timeout=5,
transport_options=None, login_method=None, uri_prefix=None,
heartbeat=0, failover_strategy='round-robin',
alternates=None, **kwargs):
alt = [] if alternates is None else alternates
# have to spell the args out, just to get nice docstrings :(
params = self._initial_params = {
'hostname': hostname, 'userid': userid,
'password': password, 'virtual_host': virtual_host,
'port': port, 'insist': insist, 'ssl': ssl,
'transport': transport, 'connect_timeout': connect_timeout,
'login_method': login_method, 'heartbeat': heartbeat
}
if hostname and not isinstance(hostname, string_t):
alt.extend(hostname)
hostname = alt[0]
if hostname and '://' in hostname:
if ';' in hostname:
alt.extend(hostname.split(';'))
hostname = alt[0]
if '+' in hostname[:hostname.index('://')]:
# e.g. sqla+mysql://root:masterkey@localhost/
params['transport'], params['hostname'] = \
hostname.split('+', 1)
transport = self.uri_prefix = params['transport']
else:
transport = transport or urlparse(hostname).scheme
if not get_transport_cls(transport).can_parse_url:
# we must parse the URL
params.update(parse_url(hostname))
params['transport'] = transport
self._init_params(**params)
# fallback hosts
self.alt = alt
self.failover_strategy = failover_strategies.get(
failover_strategy or 'round-robin') or failover_strategy
if self.alt:
self.cycle = self.failover_strategy(self.alt)
next(self.cycle) # skip first entry
if transport_options is None:
transport_options = {}
self.transport_options = transport_options
if _LOG_CONNECTION: # pragma: no cover
self._logger = True
if uri_prefix:
self.uri_prefix = uri_prefix
self.declared_entities = set()
def switch(self, url):
"""Switch connection parameters to use a new URL (does not
reconnect)"""
self.close()
self.declared_entities.clear()
self._closed = False
self._init_params(**dict(self._initial_params, **parse_url(url)))
def maybe_switch_next(self):
"""Switch to next URL given by the current failover strategy (if
any)."""
if self.cycle:
self.switch(next(self.cycle))
def _init_params(self, hostname, userid, password, virtual_host, port,
insist, ssl, transport, connect_timeout,
login_method, heartbeat):
transport = transport or 'amqp'
if transport == 'amqp' and supports_librabbitmq():
transport = 'librabbitmq'
self.hostname = hostname
self.userid = userid
self.password = password
self.login_method = login_method
self.virtual_host = virtual_host or self.virtual_host
self.port = port or self.port
self.insist = insist
self.connect_timeout = connect_timeout
self.ssl = ssl
self.transport_cls = transport
self.heartbeat = heartbeat and float(heartbeat)
def register_with_event_loop(self, loop):
self.transport.register_with_event_loop(self.connection, loop)
def _debug(self, msg, *args, **kwargs):
if self._logger: # pragma: no cover
fmt = '[Kombu connection:0x{id:x}] {msg}'
logger.debug(fmt.format(id=id(self), msg=text_t(msg)),
*args, **kwargs)
def connect(self):
"""Establish connection to server immediately."""
self._closed = False
return self.connection
def channel(self):
"""Create and return a new channel."""
self._debug('create channel')
chan = self.transport.create_channel(self.connection)
if _LOG_CHANNEL: # pragma: no cover
from .utils.debug import Logwrapped
return Logwrapped(chan, 'kombu.channel',
'[Kombu channel:{0.channel_id}] ')
return chan
def heartbeat_check(self, rate=2):
"""Allow the transport to perform any periodic tasks
required to make heartbeats work. This should be called
approximately every second.
If the current transport does not support heartbeats then
this is a noop operation.
:keyword rate: Rate is how often the tick is called
compared to the actual heartbeat value. E.g. if
the heartbeat is set to 3 seconds, and the tick
is called every 3 / 2 seconds, then the rate is 2.
This value is currently unused by any transports.
"""
return self.transport.heartbeat_check(self.connection, rate=rate)
def drain_events(self, **kwargs):
"""Wait for a single event from the server.
:keyword timeout: Timeout in seconds before we give up.
:raises :exc:`socket.timeout`: if the timeout is exceeded.
"""
return self.transport.drain_events(self.connection, **kwargs)
def maybe_close_channel(self, channel):
"""Close given channel, but ignore connection and channel errors."""
try:
channel.close()
except (self.connection_errors + self.channel_errors):
pass
def _do_close_self(self):
# Close only connection and channel(s), but not transport.
self.declared_entities.clear()
if self._default_channel:
self.maybe_close_channel(self._default_channel)
if self._connection:
try:
self.transport.close_connection(self._connection)
except self.connection_errors + (AttributeError, socket.error):
pass
self._connection = None
def _close(self):
"""Really close connection, even if part of a connection pool."""
self._do_close_self()
if self._transport:
self._transport.client = None
self._transport = None
self._debug('closed')
self._closed = True
def collect(self, socket_timeout=None):
# amqp requires communication to close, we don't need that just
# to clear out references, Transport._collect can also be implemented
# by other transports that want fast after fork
try:
gc_transport = self._transport._collect
except AttributeError:
_timeo = socket.getdefaulttimeout()
socket.setdefaulttimeout(socket_timeout)
try:
self._close()
except socket.timeout:
pass
finally:
socket.setdefaulttimeout(_timeo)
else:
gc_transport(self._connection)
if self._transport:
self._transport.client = None
self._transport = None
self.declared_entities.clear()
self._connection = None
def release(self):
"""Close the connection (if open)."""
self._close()
close = release
def ensure_connection(self, errback=None, max_retries=None,
interval_start=2, interval_step=2, interval_max=30,
callback=None):
"""Ensure we have a connection to the server.
If not retry establishing the connection with the settings
specified.
:keyword errback: Optional callback called each time the connection
can't be established. Arguments provided are the exception
raised and the interval that will be slept ``(exc, interval)``.
:keyword max_retries: Maximum number of times to retry.
If this limit is exceeded the connection error will be re-raised.
:keyword interval_start: The number of seconds we start sleeping for.
:keyword interval_step: How many seconds added to the interval
for each retry.
:keyword interval_max: Maximum number of seconds to sleep between
each retry.
:keyword callback: Optional callback that is called for every
internal iteration (1 s)
"""
def on_error(exc, intervals, retries, interval=0):
round = self.completes_cycle(retries)
if round:
interval = next(intervals)
if errback:
errback(exc, interval)
self.maybe_switch_next() # select next host
return interval if round else 0
retry_over_time(self.connect, self.recoverable_connection_errors,
(), {}, on_error, max_retries,
interval_start, interval_step, interval_max, callback)
return self
def completes_cycle(self, retries):
"""Return true if the cycle is complete after number of `retries`."""
return not (retries + 1) % len(self.alt) if self.alt else True
def revive(self, new_channel):
"""Revive connection after connection re-established."""
if self._default_channel:
self.maybe_close_channel(self._default_channel)
self._default_channel = None
def _default_ensure_callback(self, exc, interval):
logger.error("Ensure: Operation error: %r. Retry in %ss",
exc, interval, exc_info=True)
def ensure(self, obj, fun, errback=None, max_retries=None,
interval_start=1, interval_step=1, interval_max=1,
on_revive=None):
"""Ensure operation completes, regardless of any channel/connection
errors occurring.
Will retry by establishing the connection, and reapplying
the function.
:param fun: Method to apply.
:keyword errback: Optional callback called each time the connection
can't be established. Arguments provided are the exception
raised and the interval that will be slept ``(exc, interval)``.
:keyword max_retries: Maximum number of times to retry.
If this limit is exceeded the connection error will be re-raised.
:keyword interval_start: The number of seconds we start sleeping for.
:keyword interval_step: How many seconds added to the interval
for each retry.
:keyword interval_max: Maximum number of seconds to sleep between
each retry.
**Example**
This is an example ensuring a publish operation::
>>> from kombu import Connection, Producer
>>> conn = Connection('amqp://')
>>> producer = Producer(conn)
>>> def errback(exc, interval):
... logger.error('Error: %r', exc, exc_info=1)
... logger.info('Retry in %s seconds.', interval)
>>> publish = conn.ensure(producer, producer.publish,
... errback=errback, max_retries=3)
>>> publish({'hello': 'world'}, routing_key='dest')
"""
def _ensured(*args, **kwargs):
got_connection = 0
conn_errors = self.recoverable_connection_errors
chan_errors = self.recoverable_channel_errors
has_modern_errors = hasattr(
self.transport, 'recoverable_connection_errors',
)
for retries in count(0): # for infinity
try:
return fun(*args, **kwargs)
except conn_errors as exc:
if got_connection and not has_modern_errors:
# transport can not distinguish between
# recoverable/irrecoverable errors, so we propagate
# the error if it persists after a new connection was
# successfully established.
raise
if max_retries is not None and retries > max_retries:
raise
self._debug('ensure connection error: %r', exc, exc_info=1)
self._connection = None
self._do_close_self()
errback and errback(exc, 0)
remaining_retries = None
if max_retries is not None:
remaining_retries = max(max_retries - retries, 1)
self.ensure_connection(errback,
remaining_retries,
interval_start,
interval_step,
interval_max)
new_channel = self.channel()
self.revive(new_channel)
obj.revive(new_channel)
if on_revive:
on_revive(new_channel)
got_connection += 1
except chan_errors as exc:
if max_retries is not None and retries > max_retries:
raise
self._debug('ensure channel error: %r', exc, exc_info=1)
errback and errback(exc, 0)
_ensured.__name__ = "%s(ensured)" % fun.__name__
_ensured.__doc__ = fun.__doc__
_ensured.__module__ = fun.__module__
return _ensured
def autoretry(self, fun, channel=None, **ensure_options):
"""Decorator for functions supporting a ``channel`` keyword argument.
The resulting callable will retry calling the function if
it raises connection or channel related errors.
The return value will be a tuple of ``(retval, last_created_channel)``.
If a ``channel`` is not provided, then one will be automatically
acquired (remember to close it afterwards).
See :meth:`ensure` for the full list of supported keyword arguments.
Example usage::
channel = connection.channel()
try:
ret, channel = connection.autoretry(publish_messages, channel)
finally:
channel.close()
"""
channels = [channel]
create_channel = self.channel
class Revival(object):
__name__ = getattr(fun, '__name__', None)
__module__ = getattr(fun, '__module__', None)
__doc__ = getattr(fun, '__doc__', None)
def revive(self, channel):
channels[0] = channel
def __call__(self, *args, **kwargs):
if channels[0] is None:
self.revive(create_channel())
return fun(*args, channel=channels[0], **kwargs), channels[0]
revive = Revival()
return self.ensure(revive, revive, **ensure_options)
def create_transport(self):
return self.get_transport_cls()(client=self)
def get_transport_cls(self):
"""Get the currently used transport class."""
transport_cls = self.transport_cls
if not transport_cls or isinstance(transport_cls, string_t):
transport_cls = get_transport_cls(transport_cls)
return transport_cls
def clone(self, **kwargs):
"""Create a copy of the connection with the same connection
settings."""
return self.__class__(**dict(self._info(resolve=False), **kwargs))
def get_heartbeat_interval(self):
return self.transport.get_heartbeat_interval(self.connection)
def _info(self, resolve=True):
transport_cls = self.transport_cls
if resolve:
transport_cls = RESOLVE_ALIASES.get(transport_cls, transport_cls)
D = self.transport.default_connection_params
hostname = self.hostname or D.get('hostname')
if self.uri_prefix:
hostname = '%s+%s' % (self.uri_prefix, hostname)
info = (
('hostname', hostname),
('userid', self.userid or D.get('userid')),
('password', self.password or D.get('password')),
('virtual_host', self.virtual_host or D.get('virtual_host')),
('port', self.port or D.get('port')),
('insist', self.insist),
('ssl', self.ssl),
('transport', transport_cls),
('connect_timeout', self.connect_timeout),
('transport_options', self.transport_options),
('login_method', self.login_method or D.get('login_method')),
('uri_prefix', self.uri_prefix),
('heartbeat', self.heartbeat),
('alternates', self.alt),
)
return info
def info(self):
"""Get connection info."""
return OrderedDict(self._info())
def __eqhash__(self):
return HashedSeq(self.transport_cls, self.hostname, self.userid,
self.password, self.virtual_host, self.port,
repr(self.transport_options))
def as_uri(self, include_password=False, mask='**',
getfields=itemgetter('port', 'userid', 'password',
'virtual_host', 'transport')):
"""Convert connection parameters to URL form."""
hostname = self.hostname or 'localhost'
if self.transport.can_parse_url:
if self.uri_prefix:
return '%s+%s' % (self.uri_prefix, hostname)
return self.hostname
fields = self.info()
port, userid, password, vhost, transport = getfields(fields)
scheme = ('{0}+{1}'.format(self.uri_prefix, transport)
if self.uri_prefix else transport)
return as_url(
scheme, hostname, port, userid, password, quote(vhost),
sanitize=not include_password, mask=mask,
)
def Pool(self, limit=None, preload=None):
"""Pool of connections.
See :class:`ConnectionPool`.
:keyword limit: Maximum number of active connections.
Default is no limit.
:keyword preload: Number of connections to preload
when the pool is created. Default is 0.
*Example usage*::
>>> connection = Connection('amqp://')
>>> pool = connection.Pool(2)
>>> c1 = pool.acquire()
>>> c2 = pool.acquire()
>>> c3 = pool.acquire()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "kombu/connection.py", line 354, in acquire
raise ConnectionLimitExceeded(self.limit)
kombu.exceptions.ConnectionLimitExceeded: 2
>>> c1.release()
>>> c3 = pool.acquire()
"""
return ConnectionPool(self, limit, preload)
def ChannelPool(self, limit=None, preload=None):
"""Pool of channels.
See :class:`ChannelPool`.
:keyword limit: Maximum number of active channels.
Default is no limit.
:keyword preload: Number of channels to preload
when the pool is created. Default is 0.
*Example usage*::
>>> connection = Connection('amqp://')
>>> pool = connection.ChannelPool(2)
>>> c1 = pool.acquire()
>>> c2 = pool.acquire()
>>> c3 = pool.acquire()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "kombu/connection.py", line 354, in acquire
raise ChannelLimitExceeded(self.limit)
kombu.connection.ChannelLimitExceeded: 2
>>> c1.release()
>>> c3 = pool.acquire()
"""
return ChannelPool(self, limit, preload)
def Producer(self, channel=None, *args, **kwargs):
"""Create new :class:`kombu.Producer` instance using this
connection."""
from .messaging import Producer
return Producer(channel or self, *args, **kwargs)
def Consumer(self, queues=None, channel=None, *args, **kwargs):
"""Create new :class:`kombu.Consumer` instance using this
connection."""
from .messaging import Consumer
return Consumer(channel or self, queues, *args, **kwargs)
def SimpleQueue(self, name, no_ack=None, queue_opts=None,
exchange_opts=None, channel=None, **kwargs):
"""Create new :class:`~kombu.simple.SimpleQueue`, using a channel
from this connection.
If ``name`` is a string, a queue and exchange will be automatically
created using that name as the name of the queue and exchange,
also it will be used as the default routing key.
:param name: Name of the queue/or a :class:`~kombu.Queue`.
:keyword no_ack: Disable acknowledgements. Default is false.
:keyword queue_opts: Additional keyword arguments passed to the
constructor of the automatically created
:class:`~kombu.Queue`.
:keyword exchange_opts: Additional keyword arguments passed to the
constructor of the automatically created
:class:`~kombu.Exchange`.
:keyword channel: Custom channel to use. If not specified the
connection default channel is used.
"""
from .simple import SimpleQueue
return SimpleQueue(channel or self, name, no_ack, queue_opts,
exchange_opts, **kwargs)
def SimpleBuffer(self, name, no_ack=None, queue_opts=None,
exchange_opts=None, channel=None, **kwargs):
"""Create new :class:`~kombu.simple.SimpleQueue` using a channel
from this connection.
Same as :meth:`SimpleQueue`, but configured with buffering
semantics. The resulting queue and exchange will not be durable, also
auto delete is enabled. Messages will be transient (not persistent),
and acknowledgements are disabled (``no_ack``).
"""
from .simple import SimpleBuffer
return SimpleBuffer(channel or self, name, no_ack, queue_opts,
exchange_opts, **kwargs)
def _establish_connection(self):
self._debug('establishing connection...')
conn = self.transport.establish_connection()
self._debug('connection established: %r', conn)
return conn
def __repr__(self):
"""``x.__repr__() <==> repr(x)``"""
return '<Connection: {0} at 0x{1:x}>'.format(self.as_uri(), id(self))
def __copy__(self):
"""``x.__copy__() <==> copy(x)``"""
return self.clone()
def __reduce__(self):
return self.__class__, tuple(self.info().values()), None
def __enter__(self):
return self
def __exit__(self, *args):
self.release()
@property
def qos_semantics_matches_spec(self):
return self.transport.qos_semantics_matches_spec(self.connection)
@property
def connected(self):
"""Return true if the connection has been established."""
return (not self._closed and
self._connection is not None and
self.transport.verify_connection(self._connection))
@property
def connection(self):
"""The underlying connection object.
.. warning::
This instance is transport specific, so do not
depend on the interface of this object.
"""
if not self._closed:
if not self.connected:
self.declared_entities.clear()
self._default_channel = None
self._connection = self._establish_connection()
self._closed = False
return self._connection
@property
def default_channel(self):
"""Default channel, created upon access and closed when the connection
is closed.
Can be used for automatic channel handling when you only need one
channel, and also it is the channel implicitly used if a connection
is passed instead of a channel, to functions that require a channel.
"""
# make sure we're still connected, and if not refresh.
self.connection
if self._default_channel is None:
self._default_channel = self.channel()
return self._default_channel
@property
def host(self):
"""The host as a host name/port pair separated by colon."""
return ':'.join([self.hostname, str(self.port)])
@property
def transport(self):
if self._transport is None:
self._transport = self.create_transport()
return self._transport
@cached_property
def manager(self):
"""Experimental manager that can be used to manage/monitor the broker
instance. Not available for all transports."""
return self.transport.manager
def get_manager(self, *args, **kwargs):
return self.transport.get_manager(*args, **kwargs)
@cached_property
def recoverable_connection_errors(self):
"""List of connection related exceptions that can be recovered from,
but where the connection must be closed and re-established first."""
try:
return self.transport.recoverable_connection_errors
except AttributeError:
# There were no such classification before,
# and all errors were assumed to be recoverable,
# so this is a fallback for transports that do
# not support the new recoverable/irrecoverable classes.
return self.connection_errors + self.channel_errors
@cached_property
def recoverable_channel_errors(self):
"""List of channel related exceptions that can be automatically
recovered from without re-establishing the connection."""
try:
return self.transport.recoverable_channel_errors
except AttributeError:
return ()
@cached_property
def connection_errors(self):
"""List of exceptions that may be raised by the connection."""
return self.transport.connection_errors
@cached_property
def channel_errors(self):
"""List of exceptions that may be raised by the channel."""
return self.transport.channel_errors
@property
def supports_heartbeats(self):
return self.transport.supports_heartbeats
@property
def is_evented(self):
return self.transport.supports_ev
BrokerConnection = Connection
class Resource(object):
LimitExceeded = exceptions.LimitExceeded
def __init__(self, limit=None, preload=None):
self.limit = limit
self.preload = preload or 0
self._closed = False
self._resource = _LifoQueue()
self._dirty = set()
self.setup()
def setup(self):
raise NotImplementedError('subclass responsibility')
def _add_when_empty(self):
if self.limit and len(self._dirty) >= self.limit:
raise self.LimitExceeded(self.limit)
# All taken, put new on the queue and
# try get again, this way the first in line
# will get the resource.
self._resource.put_nowait(self.new())
def acquire(self, block=False, timeout=None):
"""Acquire resource.
:keyword block: If the limit is exceeded,
block until there is an available item.
:keyword timeout: Timeout to wait
if ``block`` is true. Default is :const:`None` (forever).
:raises LimitExceeded: if block is false
and the limit has been exceeded.
"""
if self._closed:
raise RuntimeError('Acquire on closed pool')
if self.limit:
while 1:
try:
R = self._resource.get(block=block, timeout=timeout)
except Empty:
self._add_when_empty()
else:
try:
R = self.prepare(R)
except BaseException:
if isinstance(R, lazy):
# no evaluated yet, just put it back
self._resource.put_nowait(R)
else:
# evaluted so must try to release/close first.
self.release(R)
raise
self._dirty.add(R)
break
else:
R = self.prepare(self.new())
def release():
"""Release resource so it can be used by another thread.
The caller is responsible for discarding the object,
and to never use the resource again. A new resource must
be acquired if so needed.
"""
self.release(R)
R.release = release
return R
def prepare(self, resource):
return resource
def close_resource(self, resource):
resource.close()
def release_resource(self, resource):
pass
def replace(self, resource):
"""Replace resource with a new instance. This can be used in case
of defective resources."""
if self.limit:
self._dirty.discard(resource)
self.close_resource(resource)
def release(self, resource):
if self.limit:
self._dirty.discard(resource)
self._resource.put_nowait(resource)
self.release_resource(resource)
else:
self.close_resource(resource)
def collect_resource(self, resource):
pass
def force_close_all(self):
"""Close and remove all resources in the pool (also those in use).
Can be used to close resources from parent processes
after fork (e.g. sockets/connections).
"""
self._closed = True
dirty = self._dirty
resource = self._resource
while 1: # - acquired
try:
dres = dirty.pop()
except KeyError:
break
try:
self.collect_resource(dres)
except AttributeError: # Issue #78
pass
while 1: # - available
# deque supports '.clear', but lists do not, so for that
# reason we use pop here, so that the underlying object can
# be any object supporting '.pop' and '.append'.
try:
res = resource.queue.pop()
except IndexError:
break
try:
self.collect_resource(res)
except AttributeError:
pass # Issue #78
if os.environ.get('KOMBU_DEBUG_POOL'): # pragma: no cover
_orig_acquire = acquire
_orig_release = release
_next_resource_id = 0
def acquire(self, *args, **kwargs): # noqa
import traceback
id = self._next_resource_id = self._next_resource_id + 1
print('+{0} ACQUIRE {1}'.format(id, self.__class__.__name__))
r = self._orig_acquire(*args, **kwargs)
r._resource_id = id
print('-{0} ACQUIRE {1}'.format(id, self.__class__.__name__))
if not hasattr(r, 'acquired_by'):
r.acquired_by = []
r.acquired_by.append(traceback.format_stack())
return r
def release(self, resource): # noqa
id = resource._resource_id
print('+{0} RELEASE {1}'.format(id, self.__class__.__name__))
r = self._orig_release(resource)
print('-{0} RELEASE {1}'.format(id, self.__class__.__name__))
self._next_resource_id -= 1
return r
class ConnectionPool(Resource):
LimitExceeded = exceptions.ConnectionLimitExceeded
def __init__(self, connection, limit=None, preload=None):
self.connection = connection
super(ConnectionPool, self).__init__(limit=limit,
preload=preload)
def new(self):
return self.connection.clone()
def release_resource(self, resource):
try:
resource._debug('released')
except AttributeError:
pass
def close_resource(self, resource):
resource._close()
def collect_resource(self, resource, socket_timeout=0.1):
return resource.collect(socket_timeout)
@contextmanager
def acquire_channel(self, block=False):
with self.acquire(block=block) as connection:
yield connection, connection.default_channel
def setup(self):
if self.limit:
for i in range(self.limit):
if i < self.preload:
conn = self.new()
conn.connect()
else:
conn = lazy(self.new)
self._resource.put_nowait(conn)
def prepare(self, resource):
if callable(resource):
resource = resource()
resource._debug('acquired')
return resource
class ChannelPool(Resource):
LimitExceeded = exceptions.ChannelLimitExceeded
def __init__(self, connection, limit=None, preload=None):
self.connection = connection
super(ChannelPool, self).__init__(limit=limit,
preload=preload)
def new(self):
return lazy(self.connection.channel)
def setup(self):
channel = self.new()
if self.limit:
for i in range(self.limit):
self._resource.put_nowait(
i < self.preload and channel() or lazy(channel))
def prepare(self, channel):
if callable(channel):
channel = channel()
return channel
def maybe_channel(channel):
"""Return the default channel if argument is a connection instance,
otherwise just return the channel given."""
if isinstance(channel, Connection):
return channel.default_channel
return channel
def is_connection(obj):
return isinstance(obj, Connection)
| |
# -*- coding: utf-8 -*-
import mimetypes
from os.path import join
from django.conf import settings
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from djspace.application.models import *
from djspace.core.forms import EmailApplicantsForm
from djspace.core.forms import PhotoForm
from djspace.core.forms import UserFilesForm
from djspace.core.models import UserFiles
from djspace.core.utils import files_status
from djspace.dashboard.views import UPLOAD_FORMS
from djtools.utils.mail import send_mail
@staff_member_required
def sendmail(request):
"""Send emails to program applicants from admin action email_applicants."""
redirect = request.META['HTTP_REFERER']
if request.POST:
# form stuff
form = EmailApplicantsForm(request.POST, use_required_attribute=False)
form.is_valid()
cd = form.cleaned_data
# content type
ct = ContentType.objects.get_for_id(cd['content_type'])
# program ids
pids = request.POST.getlist('pids[]')
# email subject
sub = "WSGC: Information about your {0} application".format(
cd['title'],
)
bcc = [request.user.email, settings.SERVER_MAIL]
for pid in pids:
instance = ct.get_object_for_this_type(pk=pid)
to = [instance.user.email]
send_mail(
request,
to,
sub,
settings.SERVER_EMAIL,
'admin/email_data.html',
{'obj': instance, 'content': cd.get('content')},
bcc,
)
messages.add_message(
request,
messages.SUCCESS,
'Your message was sent successfully.',
extra_tags='success',
)
return HttpResponseRedirect(redirect)
@csrf_exempt
@login_required
def photo_upload(request):
"""AJAX POST for uploading a photo for any given application."""
response = None
if request.is_ajax() and request.method == 'POST':
form = PhotoForm(
data=request.POST, files=request.FILES, use_required_attribute=False,
)
if form.is_valid():
ct = request.POST.get('content_type')
oid = request.POST.get('oid')
if ct and oid:
ct = ContentType.objects.get(pk=ct)
mod = ct.model_class()
try:
instance = mod.objects.get(pk=oid)
phile = form.save(commit=False)
phile.content_object = instance
phile.save()
response = render(
request,
'dashboard/view_photo.ajax.html',
{'photo': phile, 'ct': ct, 'oid': oid},
)
except Exception as error:
msg = "Fail: {0}".format(str(error))
else:
msg = "Fail: No Content Type or Object ID Provided"
else:
msg = "Fail: {0}".format(form.errors)
else:
msg = "AJAX POST required"
if not response:
response = HttpResponse(msg, content_type='text/plain; charset=utf-8')
return response
@csrf_exempt
@login_required
def user_files(request):
"""Update user files via ajax post."""
user = request.user
response = None
if request.method == 'POST':
ct = request.POST.get('content_type')
if ct:
ct = ContentType.objects.get(pk=ct)
mod = ct.model_class()
instance = mod.objects.get(pk=request.POST.get('oid'))
# team leaders, co-advisors, and grants officers can upload files
# for rocket launch teams and professional programs
manager = False
try:
goid = instance.grants_officer.id
except Exception:
goid = None
try:
coid = instance.co_advisor.id
except Exception:
coid = None
if ct.model == 'rocketlaunchteam':
if instance.leader.id == user.id or goid == user.id or coid == user.id:
manager = True
if ct.model in PROFESSIONAL_PROGRAMS:
if goid == user.id:
manager = True
# is someone being naughty?
if instance.user != user and not manager:
return HttpResponse(
"Something is rotten in Denmark",
content_type='text/plain; charset=utf-8',
)
else:
form = UPLOAD_FORMS[ct.model](
data=request.POST,
files=request.FILES,
instance=instance,
use_required_attribute=False,
)
else:
try:
instance = UserFiles.objects.get(user=user)
except Exception:
instance = None
form = UserFilesForm(
data=request.POST,
files=request.FILES,
instance=instance,
use_required_attribute=False,
)
field_name = request.POST.get('field_name')
if form.is_valid():
if field_name:
msg = "Success"
phile = form.save(commit=False)
if not ct:
phile.user = user
phile.save()
earl = getattr(phile, field_name)
# notify wsgc that a user uploaded one of her profile files
if settings.DEBUG:
to = [settings.ADMINS[0][1]]
else:
to = [settings.WSGC_EMAIL]
subject = "[File Upload] {0}: {1}, {2}".format(
field_name, user.last_name, user.first_name,
)
bcc = [settings.SERVER_MAIL]
send_mail(
request,
to,
subject,
user.email,
'dashboard/email_file_uploaded.html',
{
'earl': earl.url,
'obj': phile,
'field_name': field_name,
'userfiles': [
'mugshot', 'biography', 'irs_w9', 'media_release',
],
},
bcc,
)
response = render(
request,
'dashboard/view_file.ajax.html',
{'earl': earl.url, 'field_name': field_name},
)
else:
msg = "Fail: Field name is missing"
else:
msg = "Fail: {0}".format(form.errors)
else:
msg = "POST required"
if not response:
response = HttpResponse(msg, content_type='text/plain; charset=utf-8')
return response
@csrf_exempt
@login_required
def check_files_status(request):
"""
Determine if the user has all of her files uploaded or not.
Method: ajax post
Return: True or False
"""
if request.method == 'POST':
status = files_status(request.user)
else:
status = "POST required"
return HttpResponse(status, content_type='text/plain; charset=utf-8')
@staff_member_required
def download_file(request, field, ct, oid, uid):
"""Download a file with a name that matches the program."""
files = {
'mugshot': 'Photo',
'biography': 'Bio',
'irs_w9': 'W9',
'media_release': 'Media_Release',
}
lackey = None
if request.GET.get('lackey'):
lackey = request.GET['lackey']
user = User.objects.get(pk=uid)
attr = getattr(user.user_files, field, None)
path = join(settings.MEDIA_ROOT, attr.name)
extension = path.split('.')[-1]
ct = ContentType.objects.get(pk=ct)
mod = ct.model_class()
instance = mod.objects.get(pk=oid)
filename = '{0}_{1}.{2}'.format(
instance.get_file_name(lackey=lackey), files[field], extension,
)
with open(path, 'rb') as phile:
mime_type, _ = mimetypes.guess_type(path)
response = HttpResponse(phile, content_type=mime_type)
response['Content-Disposition'] = 'attachment; filename={0}'.format(filename)
return response
@csrf_exempt
def user_files_test(request):
"""Test for user file upload."""
valid = 'get'
if request.method == 'POST':
valid = 'no'
ct = request.POST.get('ct')
ct = ContentType.objects.get(pk=ct)
mod = ct.model_class()
instance = mod.objects.get(pk=request.POST.get('oid'))
form = UPLOAD_FORMS[ct.model](
data=request.POST,
files=request.FILES,
instance=instance,
use_required_attribute=False,
)
if form.is_valid():
form.save()
valid = 'yes'
else:
valid = form.errors
else:
ct = request.GET.get('ct')
ct = ContentType.objects.get(pk=ct)
mod = ct.model_class()
instance = mod.objects.get(pk=request.GET.get('oid'))
form = UPLOAD_FORMS[ct.model](
instance=instance, use_required_attribute=False,
)
return render(
request, 'dashboard/test.html', {'form': form, 'valid': valid},
)
@csrf_exempt
@login_required
def object_delete(request):
"""AJAX POST for deleting arbitrary objects."""
user = request.user
if request.is_ajax() and request.method == 'POST':
try:
# object ID
oid = int(request.POST.get('oid'))
# content type ID
cid = int(request.POST.get('cid'))
try:
ct = ContentType.objects.get(pk=cid)
mod = ct.model_class()
try:
instance = mod.objects.get(pk=oid)
# is someone doing something nefarious?
if oid == user.id or user.is_superuser:
instance.delete()
msg = "Success"
else:
msg = "Fail: Inadequate Permissions"
except Exception as get_error:
msg = "Fail: {0}".format(str(get_error))
except Exception as content_type_error:
msg = "Fail: {0}".format(str(content_type_error))
except Exception as error:
msg = "Fail: {0}".format(str(error))
else:
msg = "AJAX POST required"
return HttpResponse(msg, content_type="text/plain; charset=utf-8")
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
return curr_balance
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] - 3
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 94)
assert_equal(self.nodes[2].getbalance("from1"), 94-21)
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all()
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(cbTxId)
# check if wallet or blockchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
# disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
# '-salvagewallet',
]
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest().main()
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.services.types import customer_customizer_service
from .base import CustomerCustomizerServiceTransport, DEFAULT_CLIENT_INFO
class CustomerCustomizerServiceGrpcTransport(
CustomerCustomizerServiceTransport
):
"""gRPC backend transport for CustomerCustomizerService.
Service to manage customer customizer
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def mutate_customer_customizers(
self,
) -> Callable[
[customer_customizer_service.MutateCustomerCustomizersRequest],
customer_customizer_service.MutateCustomerCustomizersResponse,
]:
r"""Return a callable for the mutate customer customizers method over gRPC.
Creates, updates or removes customer customizers.
Operation statuses are returned.
Returns:
Callable[[~.MutateCustomerCustomizersRequest],
~.MutateCustomerCustomizersResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_customer_customizers" not in self._stubs:
self._stubs[
"mutate_customer_customizers"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.CustomerCustomizerService/MutateCustomerCustomizers",
request_serializer=customer_customizer_service.MutateCustomerCustomizersRequest.serialize,
response_deserializer=customer_customizer_service.MutateCustomerCustomizersResponse.deserialize,
)
return self._stubs["mutate_customer_customizers"]
__all__ = ("CustomerCustomizerServiceGrpcTransport",)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.polling.async_base_polling import AsyncLROBasePolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SparkJobDefinitionOperations:
"""SparkJobDefinitionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.artifacts.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_spark_job_definitions_by_workspace(
self,
**kwargs
) -> AsyncIterable["models.SparkJobDefinitionsListResponse"]:
"""Lists spark job definitions.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SparkJobDefinitionsListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.synapse.artifacts.models.SparkJobDefinitionsListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkJobDefinitionsListResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.get_spark_job_definitions_by_workspace.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SparkJobDefinitionsListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.CloudError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_spark_job_definitions_by_workspace.metadata = {'url': '/sparkJobDefinitions'} # type: ignore
async def create_or_update_spark_job_definition(
self,
spark_job_definition_name: str,
properties: "models.SparkJobDefinition",
if_match: Optional[str] = None,
**kwargs
) -> "models.SparkJobDefinitionResource":
"""Creates or updates a Spark Job Definition.
:param spark_job_definition_name: The spark job definition name.
:type spark_job_definition_name: str
:param properties: Properties of spark job definition.
:type properties: ~azure.synapse.artifacts.models.SparkJobDefinition
:param if_match: ETag of the Spark Job Definition entity. Should only be specified for update,
for which it should match existing entity or can be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkJobDefinitionResource, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.SparkJobDefinitionResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkJobDefinitionResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_spark_job_definition = models.SparkJobDefinitionResource(properties=properties)
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_or_update_spark_job_definition.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'sparkJobDefinitionName': self._serialize.url("spark_job_definition_name", spark_job_definition_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_spark_job_definition, 'SparkJobDefinitionResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SparkJobDefinitionResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_spark_job_definition.metadata = {'url': '/sparkJobDefinitions/{sparkJobDefinitionName}'} # type: ignore
async def get_spark_job_definition(
self,
spark_job_definition_name: str,
if_none_match: Optional[str] = None,
**kwargs
) -> Optional["models.SparkJobDefinitionResource"]:
"""Gets a Spark Job Definition.
:param spark_job_definition_name: The spark job definition name.
:type spark_job_definition_name: str
:param if_none_match: ETag of the Spark Job Definition entity. Should only be specified for
get. If the ETag matches the existing entity tag, or if * was provided, then no content will be
returned.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkJobDefinitionResource, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.SparkJobDefinitionResource or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.SparkJobDefinitionResource"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
# Construct URL
url = self.get_spark_job_definition.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'sparkJobDefinitionName': self._serialize.url("spark_job_definition_name", spark_job_definition_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SparkJobDefinitionResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_spark_job_definition.metadata = {'url': '/sparkJobDefinitions/{sparkJobDefinitionName}'} # type: ignore
async def delete_spark_job_definition(
self,
spark_job_definition_name: str,
**kwargs
) -> None:
"""Deletes a Spark Job Definition.
:param spark_job_definition_name: The spark job definition name.
:type spark_job_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
# Construct URL
url = self.delete_spark_job_definition.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'sparkJobDefinitionName': self._serialize.url("spark_job_definition_name", spark_job_definition_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete_spark_job_definition.metadata = {'url': '/sparkJobDefinitions/{sparkJobDefinitionName}'} # type: ignore
async def _execute_spark_job_definition_initial(
self,
spark_job_definition_name: str,
**kwargs
) -> "models.SparkBatchJob":
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkBatchJob"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
# Construct URL
url = self._execute_spark_job_definition_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'sparkJobDefinitionName': self._serialize.url("spark_job_definition_name", spark_job_definition_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('SparkBatchJob', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('SparkBatchJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_execute_spark_job_definition_initial.metadata = {'url': '/sparkJobDefinitions/{sparkJobDefinitionName}/execute'} # type: ignore
async def begin_execute_spark_job_definition(
self,
spark_job_definition_name: str,
**kwargs
) -> AsyncLROPoller["models.SparkBatchJob"]:
"""Executes the spark job definition.
:param spark_job_definition_name: The spark job definition name.
:type spark_job_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SparkBatchJob or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.synapse.artifacts.models.SparkBatchJob]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkBatchJob"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._execute_spark_job_definition_initial(
spark_job_definition_name=spark_job_definition_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SparkBatchJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncLROBasePolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_execute_spark_job_definition.metadata = {'url': '/sparkJobDefinitions/{sparkJobDefinitionName}/execute'} # type: ignore
async def _debug_spark_job_definition_initial(
self,
properties: "models.SparkJobDefinition",
**kwargs
) -> "models.SparkBatchJob":
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkBatchJob"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_spark_job_definition_azure_resource = models.SparkJobDefinitionResource(properties=properties)
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._debug_spark_job_definition_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_spark_job_definition_azure_resource, 'SparkJobDefinitionResource')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('SparkBatchJob', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('SparkBatchJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_debug_spark_job_definition_initial.metadata = {'url': '/debugSparkJobDefinition'} # type: ignore
async def begin_debug_spark_job_definition(
self,
properties: "models.SparkJobDefinition",
**kwargs
) -> AsyncLROPoller["models.SparkBatchJob"]:
"""Debug the spark job definition.
:param properties: Properties of spark job definition.
:type properties: ~azure.synapse.artifacts.models.SparkJobDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SparkBatchJob or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.synapse.artifacts.models.SparkBatchJob]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkBatchJob"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._debug_spark_job_definition_initial(
properties=properties,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SparkBatchJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncLROBasePolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_debug_spark_job_definition.metadata = {'url': '/debugSparkJobDefinition'} # type: ignore
| |
"""
The `ModelSerializer` and `HyperlinkedModelSerializer` classes are essentially
shortcuts for automatically creating serializers based on a given model class.
These tests deal with ensuring that we correctly map the model fields onto
an appropriate set of serializer fields for each case.
"""
from __future__ import unicode_literals
import decimal
from collections import OrderedDict
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import (
MaxValueValidator, MinLengthValidator, MinValueValidator
)
from django.db import models
from django.db.models import DurationField as ModelDurationField
from django.test import TestCase
from django.utils import six
from rest_framework import serializers
from rest_framework.compat import unicode_repr
def dedent(blocktext):
return '\n'.join([line[12:] for line in blocktext.splitlines()[1:-1]])
# Tests for regular field mappings.
# ---------------------------------
class CustomField(models.Field):
"""
A custom model field simply for testing purposes.
"""
pass
class OneFieldModel(models.Model):
char_field = models.CharField(max_length=100)
class RegularFieldsModel(models.Model):
"""
A model class for testing regular flat fields.
"""
auto_field = models.AutoField(primary_key=True)
big_integer_field = models.BigIntegerField()
boolean_field = models.BooleanField(default=False)
char_field = models.CharField(max_length=100)
comma_separated_integer_field = models.CommaSeparatedIntegerField(max_length=100)
date_field = models.DateField()
datetime_field = models.DateTimeField()
decimal_field = models.DecimalField(max_digits=3, decimal_places=1)
email_field = models.EmailField(max_length=100)
float_field = models.FloatField()
integer_field = models.IntegerField()
null_boolean_field = models.NullBooleanField()
positive_integer_field = models.PositiveIntegerField()
positive_small_integer_field = models.PositiveSmallIntegerField()
slug_field = models.SlugField(max_length=100)
small_integer_field = models.SmallIntegerField()
text_field = models.TextField(max_length=100)
time_field = models.TimeField()
url_field = models.URLField(max_length=100)
custom_field = CustomField()
file_path_field = models.FilePathField(path='/tmp/')
def method(self):
return 'method'
COLOR_CHOICES = (('red', 'Red'), ('blue', 'Blue'), ('green', 'Green'))
DECIMAL_CHOICES = (('low', decimal.Decimal('0.1')), ('medium', decimal.Decimal('0.5')), ('high', decimal.Decimal('0.9')))
class FieldOptionsModel(models.Model):
value_limit_field = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
length_limit_field = models.CharField(validators=[MinLengthValidator(3)], max_length=12)
blank_field = models.CharField(blank=True, max_length=10)
null_field = models.IntegerField(null=True)
default_field = models.IntegerField(default=0)
descriptive_field = models.IntegerField(help_text='Some help text', verbose_name='A label')
choices_field = models.CharField(max_length=100, choices=COLOR_CHOICES)
class ChoicesModel(models.Model):
choices_field_with_nonstandard_args = models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES, verbose_name='A label')
class TestModelSerializer(TestCase):
def test_create_method(self):
class TestSerializer(serializers.ModelSerializer):
non_model_field = serializers.CharField()
class Meta:
model = OneFieldModel
fields = ('char_field', 'non_model_field')
serializer = TestSerializer(data={
'char_field': 'foo',
'non_model_field': 'bar',
})
serializer.is_valid()
with self.assertRaises(TypeError) as excinfo:
serializer.save()
msginitial = 'Got a `TypeError` when calling `OneFieldModel.objects.create()`.'
assert str(excinfo.exception).startswith(msginitial)
def test_abstract_model(self):
"""
Test that trying to use ModelSerializer with Abstract Models
throws a ValueError exception.
"""
class AbstractModel(models.Model):
afield = models.CharField(max_length=255)
class Meta:
abstract = True
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = AbstractModel
fields = ('afield',)
serializer = TestSerializer(data={
'afield': 'foo',
})
with self.assertRaises(ValueError) as excinfo:
serializer.is_valid()
msginitial = 'Cannot use ModelSerializer with Abstract Models.'
assert str(excinfo.exception).startswith(msginitial)
class TestRegularFieldMappings(TestCase):
def test_regular_fields(self):
"""
Model fields should map to their equivalent serializer fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = '__all__'
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
big_integer_field = IntegerField()
boolean_field = BooleanField(required=False)
char_field = CharField(max_length=100)
comma_separated_integer_field = CharField(max_length=100, validators=[<django.core.validators.RegexValidator object>])
date_field = DateField()
datetime_field = DateTimeField()
decimal_field = DecimalField(decimal_places=1, max_digits=3)
email_field = EmailField(max_length=100)
float_field = FloatField()
integer_field = IntegerField()
null_boolean_field = NullBooleanField(required=False)
positive_integer_field = IntegerField()
positive_small_integer_field = IntegerField()
slug_field = SlugField(max_length=100)
small_integer_field = IntegerField()
text_field = CharField(max_length=100, style={'base_template': 'textarea.html'})
time_field = TimeField()
url_field = URLField(max_length=100)
custom_field = ModelField(model_field=<tests.test_model_serializer.CustomField: custom_field>)
file_path_field = FilePathField(path='/tmp/')
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_field_options(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = FieldOptionsModel
fields = '__all__'
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
value_limit_field = IntegerField(max_value=10, min_value=1)
length_limit_field = CharField(max_length=12, min_length=3)
blank_field = CharField(allow_blank=True, max_length=10, required=False)
null_field = IntegerField(allow_null=True, required=False)
default_field = IntegerField(required=False)
descriptive_field = IntegerField(help_text='Some help text', label='A label')
choices_field = ChoiceField(choices=(('red', 'Red'), ('blue', 'Blue'), ('green', 'Green')))
""")
if six.PY2:
# This particular case is too awkward to resolve fully across
# both py2 and py3.
expected = expected.replace(
"('red', 'Red'), ('blue', 'Blue'), ('green', 'Green')",
"(u'red', u'Red'), (u'blue', u'Blue'), (u'green', u'Green')"
)
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_method_field(self):
"""
Properties and methods on the model should be allowed as `Meta.fields`
values, and should map to `ReadOnlyField`.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'method')
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
method = ReadOnlyField()
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_pk_fields(self):
"""
Both `pk` and the actual primary key name are valid in `Meta.fields`.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('pk', 'auto_field')
expected = dedent("""
TestSerializer():
pk = IntegerField(label='Auto field', read_only=True)
auto_field = IntegerField(read_only=True)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_extra_field_kwargs(self):
"""
Ensure `extra_kwargs` are passed to generated fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'char_field')
extra_kwargs = {'char_field': {'default': 'extra'}}
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
char_field = CharField(default='extra', max_length=100)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_extra_field_kwargs_required(self):
"""
Ensure `extra_kwargs` are passed to generated fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'char_field')
extra_kwargs = {'auto_field': {'required': False, 'read_only': False}}
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=False, required=False)
char_field = CharField(max_length=100)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_invalid_field(self):
"""
Field names that do not map to a model field or relationship should
raise a configuration errror.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'invalid')
with self.assertRaises(ImproperlyConfigured) as excinfo:
TestSerializer().fields
expected = 'Field name `invalid` is not valid for model `RegularFieldsModel`.'
assert str(excinfo.exception) == expected
def test_missing_field(self):
"""
Fields that have been declared on the serializer class must be included
in the `Meta.fields` if it exists.
"""
class TestSerializer(serializers.ModelSerializer):
missing = serializers.ReadOnlyField()
class Meta:
model = RegularFieldsModel
fields = ('auto_field',)
with self.assertRaises(AssertionError) as excinfo:
TestSerializer().fields
expected = (
"The field 'missing' was declared on serializer TestSerializer, "
"but has not been included in the 'fields' option."
)
assert str(excinfo.exception) == expected
def test_missing_superclass_field(self):
"""
Fields that have been declared on a parent of the serializer class may
be excluded from the `Meta.fields` option.
"""
class TestSerializer(serializers.ModelSerializer):
missing = serializers.ReadOnlyField()
class Meta:
model = RegularFieldsModel
fields = '__all__'
class ChildSerializer(TestSerializer):
missing = serializers.ReadOnlyField()
class Meta:
model = RegularFieldsModel
fields = ('auto_field',)
ChildSerializer().fields
def test_choices_with_nonstandard_args(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = ChoicesModel
fields = '__all__'
ExampleSerializer()
def test_fields_and_exclude_behavior(self):
class ImplicitFieldsSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = '__all__'
class ExplicitFieldsSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = '__all__'
implicit = ImplicitFieldsSerializer()
explicit = ExplicitFieldsSerializer()
assert implicit.data == explicit.data
class TestDurationFieldMapping(TestCase):
def test_duration_field(self):
class DurationFieldModel(models.Model):
"""
A model that defines DurationField.
"""
duration_field = ModelDurationField()
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = DurationFieldModel
fields = '__all__'
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
duration_field = DurationField()
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
class TestGenericIPAddressFieldValidation(TestCase):
def test_ip_address_validation(self):
class IPAddressFieldModel(models.Model):
address = models.GenericIPAddressField()
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = IPAddressFieldModel
fields = '__all__'
s = TestSerializer(data={'address': 'not an ip address'})
self.assertFalse(s.is_valid())
self.assertEqual(1, len(s.errors['address']),
'Unexpected number of validation errors: '
'{0}'.format(s.errors))
# Tests for relational field mappings.
# ------------------------------------
class ForeignKeyTargetModel(models.Model):
name = models.CharField(max_length=100)
class ManyToManyTargetModel(models.Model):
name = models.CharField(max_length=100)
class OneToOneTargetModel(models.Model):
name = models.CharField(max_length=100)
class ThroughTargetModel(models.Model):
name = models.CharField(max_length=100)
class Supplementary(models.Model):
extra = models.IntegerField()
forwards = models.ForeignKey('ThroughTargetModel', on_delete=models.CASCADE)
backwards = models.ForeignKey('RelationalModel', on_delete=models.CASCADE)
class RelationalModel(models.Model):
foreign_key = models.ForeignKey(ForeignKeyTargetModel, related_name='reverse_foreign_key', on_delete=models.CASCADE)
many_to_many = models.ManyToManyField(ManyToManyTargetModel, related_name='reverse_many_to_many')
one_to_one = models.OneToOneField(OneToOneTargetModel, related_name='reverse_one_to_one', on_delete=models.CASCADE)
through = models.ManyToManyField(ThroughTargetModel, through=Supplementary, related_name='reverse_through')
class UniqueTogetherModel(models.Model):
foreign_key = models.ForeignKey(ForeignKeyTargetModel, related_name='unique_foreign_key', on_delete=models.CASCADE)
one_to_one = models.OneToOneField(OneToOneTargetModel, related_name='unique_one_to_one', on_delete=models.CASCADE)
class Meta:
unique_together = ("foreign_key", "one_to_one")
class TestRelationalFieldMappings(TestCase):
def test_pk_relations(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
foreign_key = PrimaryKeyRelatedField(queryset=ForeignKeyTargetModel.objects.all())
one_to_one = PrimaryKeyRelatedField(queryset=OneToOneTargetModel.objects.all(), validators=[<UniqueValidator(queryset=RelationalModel.objects.all())>])
many_to_many = PrimaryKeyRelatedField(allow_empty=False, many=True, queryset=ManyToManyTargetModel.objects.all())
through = PrimaryKeyRelatedField(many=True, read_only=True)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_nested_relations(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
depth = 1
fields = '__all__'
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
foreign_key = NestedSerializer(read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
many_to_many = NestedSerializer(many=True, read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
through = NestedSerializer(many=True, read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_hyperlinked_relations(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(view_name='relationalmodel-detail')
foreign_key = HyperlinkedRelatedField(queryset=ForeignKeyTargetModel.objects.all(), view_name='foreignkeytargetmodel-detail')
one_to_one = HyperlinkedRelatedField(queryset=OneToOneTargetModel.objects.all(), validators=[<UniqueValidator(queryset=RelationalModel.objects.all())>], view_name='onetoonetargetmodel-detail')
many_to_many = HyperlinkedRelatedField(allow_empty=False, many=True, queryset=ManyToManyTargetModel.objects.all(), view_name='manytomanytargetmodel-detail')
through = HyperlinkedRelatedField(many=True, read_only=True, view_name='throughtargetmodel-detail')
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_nested_hyperlinked_relations(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RelationalModel
depth = 1
fields = '__all__'
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(view_name='relationalmodel-detail')
foreign_key = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='foreignkeytargetmodel-detail')
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='onetoonetargetmodel-detail')
name = CharField(max_length=100)
many_to_many = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='manytomanytargetmodel-detail')
name = CharField(max_length=100)
through = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='throughtargetmodel-detail')
name = CharField(max_length=100)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_nested_unique_together_relations(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UniqueTogetherModel
depth = 1
fields = '__all__'
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(view_name='uniquetogethermodel-detail')
foreign_key = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='foreignkeytargetmodel-detail')
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='onetoonetargetmodel-detail')
name = CharField(max_length=100)
""")
if six.PY2:
# This case is also too awkward to resolve fully across both py2
# and py3. (See above)
expected = expected.replace(
"('foreign_key', 'one_to_one')",
"(u'foreign_key', u'one_to_one')"
)
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_foreign_key(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeyTargetModel
fields = ('id', 'name', 'reverse_foreign_key')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_foreign_key = PrimaryKeyRelatedField(many=True, queryset=RelationalModel.objects.all())
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_one_to_one(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneTargetModel
fields = ('id', 'name', 'reverse_one_to_one')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_one_to_one = PrimaryKeyRelatedField(queryset=RelationalModel.objects.all())
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_many_to_many(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ManyToManyTargetModel
fields = ('id', 'name', 'reverse_many_to_many')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_many_to_many = PrimaryKeyRelatedField(many=True, queryset=RelationalModel.objects.all())
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_through(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ThroughTargetModel
fields = ('id', 'name', 'reverse_through')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_through = PrimaryKeyRelatedField(many=True, read_only=True)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
class DisplayValueTargetModel(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return '%s Color' % (self.name)
class DisplayValueModel(models.Model):
color = models.ForeignKey(DisplayValueTargetModel, on_delete=models.CASCADE)
class TestRelationalFieldDisplayValue(TestCase):
def setUp(self):
DisplayValueTargetModel.objects.bulk_create([
DisplayValueTargetModel(name='Red'),
DisplayValueTargetModel(name='Yellow'),
DisplayValueTargetModel(name='Green'),
])
def test_default_display_value(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = DisplayValueModel
fields = '__all__'
serializer = TestSerializer()
expected = OrderedDict([(1, 'Red Color'), (2, 'Yellow Color'), (3, 'Green Color')])
self.assertEqual(serializer.fields['color'].choices, expected)
def test_custom_display_value(self):
class TestField(serializers.PrimaryKeyRelatedField):
def display_value(self, instance):
return 'My %s Color' % (instance.name)
class TestSerializer(serializers.ModelSerializer):
color = TestField(queryset=DisplayValueTargetModel.objects.all())
class Meta:
model = DisplayValueModel
fields = '__all__'
serializer = TestSerializer()
expected = OrderedDict([(1, 'My Red Color'), (2, 'My Yellow Color'), (3, 'My Green Color')])
self.assertEqual(serializer.fields['color'].choices, expected)
class TestIntegration(TestCase):
def setUp(self):
self.foreign_key_target = ForeignKeyTargetModel.objects.create(
name='foreign_key'
)
self.one_to_one_target = OneToOneTargetModel.objects.create(
name='one_to_one'
)
self.many_to_many_targets = [
ManyToManyTargetModel.objects.create(
name='many_to_many (%d)' % idx
) for idx in range(3)
]
self.instance = RelationalModel.objects.create(
foreign_key=self.foreign_key_target,
one_to_one=self.one_to_one_target,
)
self.instance.many_to_many = self.many_to_many_targets
self.instance.save()
def test_pk_retrival(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
serializer = TestSerializer(self.instance)
expected = {
'id': self.instance.pk,
'foreign_key': self.foreign_key_target.pk,
'one_to_one': self.one_to_one_target.pk,
'many_to_many': [item.pk for item in self.many_to_many_targets],
'through': []
}
self.assertEqual(serializer.data, expected)
def test_pk_create(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
new_foreign_key = ForeignKeyTargetModel.objects.create(
name='foreign_key'
)
new_one_to_one = OneToOneTargetModel.objects.create(
name='one_to_one'
)
new_many_to_many = [
ManyToManyTargetModel.objects.create(
name='new many_to_many (%d)' % idx
) for idx in range(3)
]
data = {
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
}
# Serializer should validate okay.
serializer = TestSerializer(data=data)
assert serializer.is_valid()
# Creating the instance, relationship attributes should be set.
instance = serializer.save()
assert instance.foreign_key.pk == new_foreign_key.pk
assert instance.one_to_one.pk == new_one_to_one.pk
assert [
item.pk for item in instance.many_to_many.all()
] == [
item.pk for item in new_many_to_many
]
assert list(instance.through.all()) == []
# Representation should be correct.
expected = {
'id': instance.pk,
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
'through': []
}
self.assertEqual(serializer.data, expected)
def test_pk_update(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
new_foreign_key = ForeignKeyTargetModel.objects.create(
name='foreign_key'
)
new_one_to_one = OneToOneTargetModel.objects.create(
name='one_to_one'
)
new_many_to_many = [
ManyToManyTargetModel.objects.create(
name='new many_to_many (%d)' % idx
) for idx in range(3)
]
data = {
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
}
# Serializer should validate okay.
serializer = TestSerializer(self.instance, data=data)
assert serializer.is_valid()
# Creating the instance, relationship attributes should be set.
instance = serializer.save()
assert instance.foreign_key.pk == new_foreign_key.pk
assert instance.one_to_one.pk == new_one_to_one.pk
assert [
item.pk for item in instance.many_to_many.all()
] == [
item.pk for item in new_many_to_many
]
assert list(instance.through.all()) == []
# Representation should be correct.
expected = {
'id': self.instance.pk,
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
'through': []
}
self.assertEqual(serializer.data, expected)
# Tests for bulk create using `ListSerializer`.
class BulkCreateModel(models.Model):
name = models.CharField(max_length=10)
class TestBulkCreate(TestCase):
def test_bulk_create(self):
class BasicModelSerializer(serializers.ModelSerializer):
class Meta:
model = BulkCreateModel
fields = ('name',)
class BulkCreateSerializer(serializers.ListSerializer):
child = BasicModelSerializer()
data = [{'name': 'a'}, {'name': 'b'}, {'name': 'c'}]
serializer = BulkCreateSerializer(data=data)
assert serializer.is_valid()
# Objects are returned by save().
instances = serializer.save()
assert len(instances) == 3
assert [item.name for item in instances] == ['a', 'b', 'c']
# Objects have been created in the database.
assert BulkCreateModel.objects.count() == 3
assert list(BulkCreateModel.objects.values_list('name', flat=True)) == ['a', 'b', 'c']
# Serializer returns correct data.
assert serializer.data == data
class MetaClassTestModel(models.Model):
text = models.CharField(max_length=100)
class TestSerializerMetaClass(TestCase):
def test_meta_class_fields_option(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = MetaClassTestModel
fields = 'text'
with self.assertRaises(TypeError) as result:
ExampleSerializer().fields
exception = result.exception
assert str(exception).startswith(
"The `fields` option must be a list or tuple"
)
def test_meta_class_exclude_option(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = MetaClassTestModel
exclude = 'text'
with self.assertRaises(TypeError) as result:
ExampleSerializer().fields
exception = result.exception
assert str(exception).startswith(
"The `exclude` option must be a list or tuple"
)
def test_meta_class_fields_and_exclude_options(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = MetaClassTestModel
fields = ('text',)
exclude = ('text',)
with self.assertRaises(AssertionError) as result:
ExampleSerializer().fields
exception = result.exception
self.assertEqual(
str(exception),
"Cannot set both 'fields' and 'exclude' options on serializer ExampleSerializer."
)
class Issue2704TestCase(TestCase):
def test_queryset_all(self):
class TestSerializer(serializers.ModelSerializer):
additional_attr = serializers.CharField()
class Meta:
model = OneFieldModel
fields = ('char_field', 'additional_attr')
OneFieldModel.objects.create(char_field='abc')
qs = OneFieldModel.objects.all()
for o in qs:
o.additional_attr = '123'
serializer = TestSerializer(instance=qs, many=True)
expected = [{
'char_field': 'abc',
'additional_attr': '123',
}]
assert serializer.data == expected
class DecimalFieldModel(models.Model):
decimal_field = models.DecimalField(
max_digits=3,
decimal_places=1,
validators=[MinValueValidator(1), MaxValueValidator(3)]
)
class TestDecimalFieldMappings(TestCase):
def test_decimal_field_has_decimal_validator(self):
"""
Test that a `DecimalField` has no `DecimalValidator`.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = DecimalFieldModel
fields = '__all__'
serializer = TestSerializer()
assert len(serializer.fields['decimal_field'].validators) == 2
def test_min_value_is_passed(self):
"""
Test that the `MinValueValidator` is converted to the `min_value`
argument for the field.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = DecimalFieldModel
fields = '__all__'
serializer = TestSerializer()
assert serializer.fields['decimal_field'].min_value == 1
def test_max_value_is_passed(self):
"""
Test that the `MaxValueValidator` is converted to the `max_value`
argument for the field.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = DecimalFieldModel
fields = '__all__'
serializer = TestSerializer()
assert serializer.fields['decimal_field'].max_value == 3
class TestMetaInheritance(TestCase):
def test_extra_kwargs_not_altered(self):
class TestSerializer(serializers.ModelSerializer):
non_model_field = serializers.CharField()
class Meta:
model = OneFieldModel
read_only_fields = ('char_field', 'non_model_field')
fields = read_only_fields
extra_kwargs = {}
class ChildSerializer(TestSerializer):
class Meta(TestSerializer.Meta):
read_only_fields = ()
test_expected = dedent("""
TestSerializer():
char_field = CharField(read_only=True)
non_model_field = CharField()
""")
child_expected = dedent("""
ChildSerializer():
char_field = CharField(max_length=100)
non_model_field = CharField()
""")
self.assertEqual(unicode_repr(ChildSerializer()), child_expected)
self.assertEqual(unicode_repr(TestSerializer()), test_expected)
self.assertEqual(unicode_repr(ChildSerializer()), child_expected)
class OneToOneTargetTestModel(models.Model):
text = models.CharField(max_length=100)
class OneToOneSourceTestModel(models.Model):
target = models.OneToOneField(OneToOneTargetTestModel, primary_key=True)
class TestModelFieldValues(TestCase):
def test_model_field(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneSourceTestModel
fields = ('target',)
target = OneToOneTargetTestModel(id=1, text='abc')
source = OneToOneSourceTestModel(target=target)
serializer = ExampleSerializer(source)
self.assertEqual(serializer.data, {'target': 1})
class TestUniquenessOverride(TestCase):
def test_required_not_overwritten(self):
class TestModel(models.Model):
field_1 = models.IntegerField(null=True)
field_2 = models.IntegerField()
class Meta:
unique_together = (('field_1', 'field_2'),)
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = TestModel
extra_kwargs = {'field_1': {'required': False}}
fields = TestSerializer().fields
self.assertFalse(fields['field_1'].required)
self.assertTrue(fields['field_2'].required)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''Implementation of SQLAlchemy backend.'''
from datetime import datetime
from datetime import timedelta
import sys
from oslo.config import cfg
from oslo.db.sqlalchemy import session as db_session
from oslo.db.sqlalchemy import utils
import sqlalchemy
from sqlalchemy import orm
from sqlalchemy.orm.session import Session
from heat.common import crypt
from heat.common import exception
from heat.common.i18n import _
from heat.db.sqlalchemy import filters as db_filters
from heat.db.sqlalchemy import migration
from heat.db.sqlalchemy import models
from heat.rpc import api as rpc_api
CONF = cfg.CONF
CONF.import_opt('max_events_per_stack', 'heat.common.config')
_facade = None
def get_facade():
global _facade
if not _facade:
_facade = db_session.EngineFacade.from_config(CONF)
return _facade
get_engine = lambda: get_facade().get_engine()
get_session = lambda: get_facade().get_session()
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def model_query(context, *args):
session = _session(context)
query = session.query(*args)
return query
def soft_delete_aware_query(context, *args, **kwargs):
"""Stack query helper that accounts for context's `show_deleted` field.
:param show_deleted: if True, overrides context's show_deleted field.
"""
query = model_query(context, *args)
show_deleted = kwargs.get('show_deleted') or context.show_deleted
if not show_deleted:
query = query.filter_by(deleted_at=None)
return query
def _session(context):
return (context and context.session) or get_session()
def raw_template_get(context, template_id):
result = model_query(context, models.RawTemplate).get(template_id)
if not result:
raise exception.NotFound(_('raw template with id %s not found') %
template_id)
return result
def raw_template_create(context, values):
raw_template_ref = models.RawTemplate()
raw_template_ref.update(values)
raw_template_ref.save(_session(context))
return raw_template_ref
def raw_template_update(context, template_id, values):
raw_template_ref = raw_template_get(context, template_id)
# get only the changed values
values = dict((k, v) for k, v in values.items()
if getattr(raw_template_ref, k) != v)
if values:
raw_template_ref.update_and_save(values)
return raw_template_ref
def resource_get(context, resource_id):
result = model_query(context, models.Resource).get(resource_id)
if not result:
raise exception.NotFound(_("resource with id %s not found") %
resource_id)
return result
def resource_get_by_name_and_stack(context, resource_name, stack_id):
result = model_query(context, models.Resource).\
filter_by(name=resource_name).\
filter_by(stack_id=stack_id).\
options(orm.joinedload("data")).first()
return result
def resource_get_by_physical_resource_id(context, physical_resource_id):
results = (model_query(context, models.Resource)
.filter_by(nova_instance=physical_resource_id)
.all())
for result in results:
if context is None or context.tenant_id in (
result.stack.tenant, result.stack.stack_user_project_id):
return result
return None
def resource_get_all(context):
results = model_query(context, models.Resource).all()
if not results:
raise exception.NotFound(_('no resources were found'))
return results
def resource_data_get_all(resource, data=None):
"""
Looks up resource_data by resource.id. If data is encrypted,
this method will decrypt the results.
"""
if data is None:
data = (model_query(resource.context, models.ResourceData)
.filter_by(resource_id=resource.id))
if not data:
raise exception.NotFound(_('no resource data found'))
ret = {}
for res in data:
if res.redact:
ret[res.key] = _decrypt(res.value, res.decrypt_method)
else:
ret[res.key] = res.value
return ret
def resource_data_get(resource, key):
"""Lookup value of resource's data by key. Decrypts resource data if
necessary.
"""
result = resource_data_get_by_key(resource.context,
resource.id,
key)
if result.redact:
return _decrypt(result.value, result.decrypt_method)
return result.value
def _encrypt(value):
if value is not None:
return crypt.encrypt(value.encode('utf-8'))
else:
return None, None
def _decrypt(enc_value, method):
if method is None:
return None
decryptor = getattr(crypt, method)
value = decryptor(enc_value)
if value is not None:
return unicode(value, 'utf-8')
def resource_data_get_by_key(context, resource_id, key):
"""Looks up resource_data by resource_id and key. Does not unencrypt
resource_data.
"""
result = (model_query(context, models.ResourceData)
.filter_by(resource_id=resource_id)
.filter_by(key=key).first())
if not result:
raise exception.NotFound(_('No resource data found'))
return result
def resource_data_set(resource, key, value, redact=False):
"""Save resource's key/value pair to database."""
if redact:
method, value = _encrypt(value)
else:
method = ''
try:
current = resource_data_get_by_key(resource.context, resource.id, key)
except exception.NotFound:
current = models.ResourceData()
current.key = key
current.resource_id = resource.id
current.redact = redact
current.value = value
current.decrypt_method = method
current.save(session=resource.context.session)
return current
def resource_exchange_stacks(context, resource_id1, resource_id2):
query = model_query(context, models.Resource)
session = query.session
session.begin()
res1 = query.get(resource_id1)
res2 = query.get(resource_id2)
res1.stack, res2.stack = res2.stack, res1.stack
session.commit()
def resource_data_delete(resource, key):
result = resource_data_get_by_key(resource.context, resource.id, key)
result.delete()
def resource_create(context, values):
resource_ref = models.Resource()
resource_ref.update(values)
resource_ref.save(_session(context))
return resource_ref
def resource_get_all_by_stack(context, stack_id):
results = model_query(context, models.Resource).\
filter_by(stack_id=stack_id).\
options(orm.joinedload("data")).all()
if not results:
raise exception.NotFound(_("no resources for stack_id %s were found")
% stack_id)
return dict((res.name, res) for res in results)
def stack_get_by_name_and_owner_id(context, stack_name, owner_id):
query = soft_delete_aware_query(context, models.Stack).\
filter(sqlalchemy.or_(
models.Stack.tenant == context.tenant_id,
models.Stack.stack_user_project_id == context.tenant_id
)).\
filter_by(name=stack_name).\
filter_by(owner_id=owner_id)
return query.first()
def stack_get_by_name(context, stack_name):
query = soft_delete_aware_query(context, models.Stack).\
filter(sqlalchemy.or_(
models.Stack.tenant == context.tenant_id,
models.Stack.stack_user_project_id == context.tenant_id
)).\
filter_by(name=stack_name)
return query.first()
def stack_get(context, stack_id, show_deleted=False, tenant_safe=True,
eager_load=False):
query = model_query(context, models.Stack)
if eager_load:
query = query.options(orm.joinedload("raw_template"))
result = query.get(stack_id)
deleted_ok = show_deleted or context.show_deleted
if result is None or result.deleted_at is not None and not deleted_ok:
return None
# One exception to normal project scoping is users created by the
# stacks in the stack_user_project_id (in the heat stack user domain)
if (tenant_safe and result is not None and context is not None and
context.tenant_id not in (result.tenant,
result.stack_user_project_id)):
return None
return result
def stack_get_all_by_owner_id(context, owner_id):
results = soft_delete_aware_query(context, models.Stack).\
filter_by(owner_id=owner_id).all()
return results
def _get_sort_keys(sort_keys, mapping):
'''Returns an array containing only whitelisted keys
:param sort_keys: an array of strings
:param mapping: a mapping from keys to DB column names
:returns: filtered list of sort keys
'''
if isinstance(sort_keys, basestring):
sort_keys = [sort_keys]
return [mapping[key] for key in sort_keys or [] if key in mapping]
def _paginate_query(context, query, model, limit=None, sort_keys=None,
marker=None, sort_dir=None):
default_sort_keys = ['created_at']
if not sort_keys:
sort_keys = default_sort_keys
if not sort_dir:
sort_dir = 'desc'
# This assures the order of the stacks will always be the same
# even for sort_key values that are not unique in the database
sort_keys = sort_keys + ['id']
model_marker = None
if marker:
model_marker = model_query(context, model).get(marker)
try:
query = utils.paginate_query(query, model, limit, sort_keys,
model_marker, sort_dir)
except utils.InvalidSortKey as exc:
raise exception.Invalid(reason=exc.message)
return query
def _query_stack_get_all(context, tenant_safe=True, show_deleted=False,
show_nested=False):
if show_nested:
query = soft_delete_aware_query(context, models.Stack,
show_deleted=show_deleted).\
filter_by(backup=False)
else:
query = soft_delete_aware_query(context, models.Stack,
show_deleted=show_deleted).\
filter_by(owner_id=None)
if tenant_safe:
query = query.filter_by(tenant=context.tenant_id)
return query
def stack_get_all(context, limit=None, sort_keys=None, marker=None,
sort_dir=None, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False):
query = _query_stack_get_all(context, tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested)
return _filter_and_page_query(context, query, limit, sort_keys,
marker, sort_dir, filters).all()
def _filter_and_page_query(context, query, limit=None, sort_keys=None,
marker=None, sort_dir=None, filters=None):
if filters is None:
filters = {}
sort_key_map = {rpc_api.STACK_NAME: models.Stack.name.key,
rpc_api.STACK_STATUS: models.Stack.status.key,
rpc_api.STACK_CREATION_TIME: models.Stack.created_at.key,
rpc_api.STACK_UPDATED_TIME: models.Stack.updated_at.key}
whitelisted_sort_keys = _get_sort_keys(sort_keys, sort_key_map)
query = db_filters.exact_filter(query, models.Stack, filters)
return _paginate_query(context, query, models.Stack, limit,
whitelisted_sort_keys, marker, sort_dir)
def stack_count_all(context, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False):
query = _query_stack_get_all(context, tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested)
query = db_filters.exact_filter(query, models.Stack, filters)
return query.count()
def stack_create(context, values):
stack_ref = models.Stack()
stack_ref.update(values)
stack_ref.save(_session(context))
return stack_ref
def stack_update(context, stack_id, values):
stack = stack_get(context, stack_id)
if not stack:
raise exception.NotFound(_('Attempt to update a stack with id: '
'%(id)s %(msg)s') % {
'id': stack_id,
'msg': 'that does not exist'})
stack.update(values)
stack.save(_session(context))
def stack_delete(context, stack_id):
s = stack_get(context, stack_id)
if not s:
raise exception.NotFound(_('Attempt to delete a stack with id: '
'%(id)s %(msg)s') % {
'id': stack_id,
'msg': 'that does not exist'})
session = Session.object_session(s)
for r in s.resources:
session.delete(r)
s.soft_delete(session=session)
session.flush()
def stack_lock_create(stack_id, engine_id):
session = get_session()
with session.begin():
lock = session.query(models.StackLock).get(stack_id)
if lock is not None:
return lock.engine_id
session.add(models.StackLock(stack_id=stack_id, engine_id=engine_id))
def stack_lock_steal(stack_id, old_engine_id, new_engine_id):
session = get_session()
with session.begin():
lock = session.query(models.StackLock).get(stack_id)
rows_affected = session.query(models.StackLock).\
filter_by(stack_id=stack_id, engine_id=old_engine_id).\
update({"engine_id": new_engine_id})
if not rows_affected:
return lock.engine_id if lock is not None else True
def stack_lock_release(stack_id, engine_id):
session = get_session()
with session.begin():
rows_affected = session.query(models.StackLock).\
filter_by(stack_id=stack_id, engine_id=engine_id).\
delete()
if not rows_affected:
return True
def user_creds_create(context):
values = context.to_dict()
user_creds_ref = models.UserCreds()
if values.get('trust_id'):
method, trust_id = _encrypt(values.get('trust_id'))
user_creds_ref.trust_id = trust_id
user_creds_ref.decrypt_method = method
user_creds_ref.trustor_user_id = values.get('trustor_user_id')
user_creds_ref.username = None
user_creds_ref.password = None
user_creds_ref.tenant = values.get('tenant')
user_creds_ref.tenant_id = values.get('tenant_id')
else:
user_creds_ref.update(values)
method, password = _encrypt(values['password'])
user_creds_ref.password = password
user_creds_ref.decrypt_method = method
user_creds_ref.save(_session(context))
return user_creds_ref
def user_creds_get(user_creds_id):
db_result = model_query(None, models.UserCreds).get(user_creds_id)
if db_result is None:
return None
# Return a dict copy of db results, do not decrypt details into db_result
# or it can be committed back to the DB in decrypted form
result = dict(db_result)
del result['decrypt_method']
result['password'] = _decrypt(result['password'], db_result.decrypt_method)
result['trust_id'] = _decrypt(result['trust_id'], db_result.decrypt_method)
return result
def user_creds_delete(context, user_creds_id):
creds = model_query(context, models.UserCreds).get(user_creds_id)
if not creds:
raise exception.NotFound(
_('Attempt to delete user creds with id '
'%(id)s that does not exist') % {'id': user_creds_id})
session = Session.object_session(creds)
session.delete(creds)
session.flush()
def event_get(context, event_id):
result = model_query(context, models.Event).get(event_id)
return result
def event_get_all(context):
stacks = soft_delete_aware_query(context, models.Stack)
stack_ids = [stack.id for stack in stacks]
results = model_query(context, models.Event).\
filter(models.Event.stack_id.in_(stack_ids)).all()
return results
def event_get_all_by_tenant(context, limit=None, marker=None,
sort_keys=None, sort_dir=None, filters=None):
query = model_query(context, models.Event)
query = db_filters.exact_filter(query, models.Event, filters)
query = query.join(models.Event.stack).\
filter_by(tenant=context.tenant_id).filter_by(deleted_at=None)
filters = None
return _events_filter_and_page_query(context, query, limit, marker,
sort_keys, sort_dir, filters).all()
def _query_all_by_stack(context, stack_id):
query = model_query(context, models.Event).\
filter_by(stack_id=stack_id)
return query
def event_get_all_by_stack(context, stack_id, limit=None, marker=None,
sort_keys=None, sort_dir=None, filters=None):
query = _query_all_by_stack(context, stack_id)
return _events_filter_and_page_query(context, query, limit, marker,
sort_keys, sort_dir, filters).all()
def _events_paginate_query(context, query, model, limit=None, sort_keys=None,
marker=None, sort_dir=None):
default_sort_keys = ['created_at']
if not sort_keys:
sort_keys = default_sort_keys
if not sort_dir:
sort_dir = 'desc'
# This assures the order of the stacks will always be the same
# even for sort_key values that are not unique in the database
sort_keys = sort_keys + ['id']
model_marker = None
if marker:
# not to use model_query(context, model).get(marker), because
# user can only see the ID(column 'uuid') and the ID as the marker
model_marker = model_query(context, model).filter_by(uuid=marker).\
first()
try:
query = utils.paginate_query(query, model, limit, sort_keys,
model_marker, sort_dir)
except utils.InvalidSortKey as exc:
raise exception.Invalid(reason=exc.message)
return query
def _events_filter_and_page_query(context, query,
limit=None, marker=None,
sort_keys=None, sort_dir=None,
filters=None):
if filters is None:
filters = {}
sort_key_map = {rpc_api.EVENT_TIMESTAMP: models.Event.created_at.key,
rpc_api.EVENT_RES_TYPE: models.Event.resource_type.key}
whitelisted_sort_keys = _get_sort_keys(sort_keys, sort_key_map)
query = db_filters.exact_filter(query, models.Event, filters)
return _events_paginate_query(context, query, models.Event, limit,
whitelisted_sort_keys, marker, sort_dir)
def event_count_all_by_stack(context, stack_id):
return _query_all_by_stack(context, stack_id).count()
def _delete_event_rows(context, stack_id, limit):
# MySQL does not support LIMIT in subqueries,
# sqlite does not support JOIN in DELETE.
# So we must manually supply the IN() values.
# pgsql SHOULD work with the pure DELETE/JOIN below but that must be
# confirmed via integration tests.
query = _query_all_by_stack(context, stack_id)
session = _session(context)
ids = [r.id for r in query.order_by(
models.Event.id).limit(limit).all()]
q = session.query(models.Event).filter(
models.Event.id.in_(ids))
return q.delete(synchronize_session='fetch')
def event_create(context, values):
if 'stack_id' in values and cfg.CONF.max_events_per_stack:
if ((event_count_all_by_stack(context, values['stack_id']) >=
cfg.CONF.max_events_per_stack)):
# prune
_delete_event_rows(
context, values['stack_id'], cfg.CONF.event_purge_batch_size)
event_ref = models.Event()
event_ref.update(values)
event_ref.save(_session(context))
return event_ref
def watch_rule_get(context, watch_rule_id):
result = model_query(context, models.WatchRule).get(watch_rule_id)
return result
def watch_rule_get_by_name(context, watch_rule_name):
result = model_query(context, models.WatchRule).\
filter_by(name=watch_rule_name).first()
return result
def watch_rule_get_all(context):
results = model_query(context, models.WatchRule).all()
return results
def watch_rule_get_all_by_stack(context, stack_id):
results = model_query(context, models.WatchRule).\
filter_by(stack_id=stack_id).all()
return results
def watch_rule_create(context, values):
obj_ref = models.WatchRule()
obj_ref.update(values)
obj_ref.save(_session(context))
return obj_ref
def watch_rule_update(context, watch_id, values):
wr = watch_rule_get(context, watch_id)
if not wr:
raise exception.NotFound(_('Attempt to update a watch with id: '
'%(id)s %(msg)s') % {
'id': watch_id,
'msg': 'that does not exist'})
wr.update(values)
wr.save(_session(context))
def watch_rule_delete(context, watch_id):
wr = watch_rule_get(context, watch_id)
if not wr:
raise exception.NotFound(_('Attempt to delete watch_rule: '
'%(id)s %(msg)s') % {
'id': watch_id,
'msg': 'that does not exist'})
session = Session.object_session(wr)
for d in wr.watch_data:
session.delete(d)
session.delete(wr)
session.flush()
def watch_data_create(context, values):
obj_ref = models.WatchData()
obj_ref.update(values)
obj_ref.save(_session(context))
return obj_ref
def watch_data_get_all(context):
results = model_query(context, models.WatchData).all()
return results
def software_config_create(context, values):
obj_ref = models.SoftwareConfig()
obj_ref.update(values)
obj_ref.save(_session(context))
return obj_ref
def software_config_get(context, config_id):
result = model_query(context, models.SoftwareConfig).get(config_id)
if (result is not None and context is not None and
result.tenant != context.tenant_id):
result = None
if not result:
raise exception.NotFound(_('Software config with id %s not found') %
config_id)
return result
def software_config_delete(context, config_id):
config = software_config_get(context, config_id)
session = Session.object_session(config)
session.delete(config)
session.flush()
def software_deployment_create(context, values):
obj_ref = models.SoftwareDeployment()
obj_ref.update(values)
obj_ref.save(_session(context))
return obj_ref
def software_deployment_get(context, deployment_id):
result = model_query(context, models.SoftwareDeployment).get(deployment_id)
if (result is not None and context is not None and
context.tenant_id not in (result.tenant,
result.stack_user_project_id)):
result = None
if not result:
raise exception.NotFound(_('Deployment with id %s not found') %
deployment_id)
return result
def software_deployment_get_all(context, server_id=None):
sd = models.SoftwareDeployment
query = model_query(context, sd).\
filter(sqlalchemy.or_(
sd.tenant == context.tenant_id,
sd.stack_user_project_id == context.tenant_id
)).\
order_by(sd.created_at)
if server_id:
query = query.filter_by(server_id=server_id)
return query.all()
def software_deployment_update(context, deployment_id, values):
deployment = software_deployment_get(context, deployment_id)
deployment.update(values)
deployment.save(_session(context))
return deployment
def software_deployment_delete(context, deployment_id):
deployment = software_deployment_get(context, deployment_id)
session = Session.object_session(deployment)
session.delete(deployment)
session.flush()
def snapshot_create(context, values):
obj_ref = models.Snapshot()
obj_ref.update(values)
obj_ref.save(_session(context))
return obj_ref
def snapshot_get(context, snapshot_id):
result = model_query(context, models.Snapshot).get(snapshot_id)
if (result is not None and context is not None and
context.tenant_id != result.tenant):
result = None
if not result:
raise exception.NotFound(_('Snapshot with id %s not found') %
snapshot_id)
return result
def snapshot_update(context, snapshot_id, values):
snapshot = snapshot_get(context, snapshot_id)
snapshot.update(values)
snapshot.save(_session(context))
return snapshot
def snapshot_delete(context, snapshot_id):
snapshot = snapshot_get(context, snapshot_id)
session = Session.object_session(snapshot)
session.delete(snapshot)
session.flush()
def snapshot_get_all(context, stack_id):
return model_query(context, models.Snapshot).filter_by(
stack_id=stack_id, tenant=context.tenant_id)
def purge_deleted(age, granularity='days'):
try:
age = int(age)
except ValueError:
raise exception.Error(_("age should be an integer"))
if age < 0:
raise exception.Error(_("age should be a positive integer"))
if granularity not in ('days', 'hours', 'minutes', 'seconds'):
raise exception.Error(
_("granularity should be days, hours, minutes, or seconds"))
if granularity == 'days':
age = age * 86400
elif granularity == 'hours':
age = age * 3600
elif granularity == 'minutes':
age = age * 60
time_line = datetime.now() - timedelta(seconds=age)
engine = get_engine()
meta = sqlalchemy.MetaData()
meta.bind = engine
stack = sqlalchemy.Table('stack', meta, autoload=True)
event = sqlalchemy.Table('event', meta, autoload=True)
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
stmt = sqlalchemy.select([stack.c.id,
stack.c.raw_template_id,
stack.c.user_creds_id]).\
where(stack.c.deleted_at < time_line)
deleted_stacks = engine.execute(stmt)
for s in deleted_stacks:
event_del = event.delete().where(event.c.stack_id == s[0])
engine.execute(event_del)
stack_del = stack.delete().where(stack.c.id == s[0])
engine.execute(stack_del)
raw_template_del = raw_template.delete().\
where(raw_template.c.id == s[1])
engine.execute(raw_template_del)
user_creds_del = user_creds.delete().where(user_creds.c.id == s[2])
engine.execute(user_creds_del)
def db_sync(engine, version=None):
"""Migrate the database to `version` or the most recent version."""
return migration.db_sync(engine, version=version)
def db_version(engine):
"""Display the current database version."""
return migration.db_version(engine)
| |
'''
Matches files belonging together.
Example match for shapefile: file1.shp, file1.shx, file1.dbf
'''
import os
from collections import namedtuple
from OGRgeoConverter.models import OgrFormat, AdditionalOgrFormat
from OGRgeoConverter.filesystem import archives
class FileMatcher:
'''
Saves a list of files by an ID and returns files being matched
'''
def __init__(self, file_dict):
self.__file_dict = file_dict
self.__file_matches = _get_matches_from_file_list(self.__file_dict)
def get_file_name(self, file_id):
file_match = self.get_match(file_id)
if file_match is not None:
return file_match.get_file_dict()[file_id]
else:
return self.__file_dict[file_id]
def get_original_file_name(self, file_id):
return self.__file_dict[file_id]
def get_match(self, file_id):
for file_match in self.__file_matches:
if file_id in file_match.get_file_dict():
return file_match
return None
def get_matches(self):
return self.__file_matches
class FileMatch:
'''
Represents a single match of files belonging together
'''
def __init__(self, file_dict, ogr_format_name, is_archive, is_valid):
self.__file_dict = file_dict
self.__ogr_format_name = ogr_format_name
self.__is_archive = is_archive
self.__is_valid = is_valid
def get_file_dict(self):
return self.__file_dict
def get_files(self):
return list(self.__file_dict.values())
def get_ogr_format_name(self):
return self.__ogr_format_name
def is_archive(self):
return self.__is_archive
def is_valid(self):
return self.__is_valid
def rename_file(self, file_id, new_name):
if file_id in self.__file_dict:
self.__file_dict[file_id] = new_name
def _get_matches_from_file_list(file_dict):
'''
Does the actual file matching based on the format information set in the Django admin.
Takes a file dictionary as argument and returns a list of FileMatch objects.
'''
if len(file_dict) == 0:
return []
else:
formats = OgrFormat.get_formats_information_dictionary()
matches = []
ogr_format_files, additional_format_files, archive_formats, unknown_format_files = _get_extended_file_lists(
file_dict)
additional_format_in_use = [False] * len(additional_format_files)
for file_info in ogr_format_files:
is_valid = True
matched_files_dict = {file_info.file_id: file_info.full_name}
for additional_format in formats[
file_info.format_name].additional_files:
limit_reached = False
for i in range(len(additional_format_files)):
if not limit_reached and additional_format_files[i].file_extension.lower() == additional_format.file_extension.lower(
) and additional_format_files[i].file_name == file_info.file_name and not additional_format_in_use[i]:
matched_files_dict[additional_format_files[
i].file_id] = additional_format_files[i].full_name
additional_format_in_use[i] = True
if not additional_format.is_multiple:
limit_reached = True
matches.append(
FileMatch(
matched_files_dict,
file_info.format_name,
False,
is_valid))
for file_info in archive_formats:
matches.append(
FileMatch(
{file_info.file_id: file_info.full_name},
file_info.format_name, True, True))
for file_info in unknown_format_files:
matches.append(
FileMatch(
{file_info.file_id: file_info.full_name},
file_info.format_name, False, False))
_resolve_name_conflicts(matches)
return matches
ExtendedFileList = namedtuple(
'ExtendedFileList',
['file_id', 'full_name', 'file_name', 'file_extension', 'is_archive',
'format_name'])
def _get_extended_file_lists(file_dict):
'''
Adds additional information to a file dictionary like the format name and OGR or wheter it is an archive file
'''
ogr_format_files = []
additional_format_files = []
archive_formats = []
unknown_format_files = []
for file_id, full_name in file_dict.items():
file_name = os.path.splitext(full_name)[0]
file_extension = os.path.splitext(full_name)[1].lstrip(os.path.extsep)
format_name = OgrFormat.get_ogr_name_by_file_extension(file_extension)
is_archive = archives.is_archive_file_extension(file_extension)
extended_list = ExtendedFileList(
file_id,
full_name,
file_name,
file_extension,
is_archive,
format_name)
if OgrFormat.contains_extension(file_extension):
ogr_format_files.append(extended_list)
elif AdditionalOgrFormat.contains_extension(file_extension):
additional_format_files.append(extended_list)
elif archives.is_archive_file_extension(file_extension):
archive_formats.append(extended_list)
else:
unknown_format_files.append(extended_list)
return ogr_format_files, additional_format_files, archive_formats, unknown_format_files
def _resolve_name_conflicts(matches):
# Stores a list of the file names (without file extension) of every file
# match
file_names = []
for i in range(len(matches)):
files = list(matches[i].get_file_dict().values())
file_name = os.path.splitext(files[0])[0]
file_names.append(file_name)
for i in range(len(matches)):
found_double = False
for j in range(i + 1, len(matches)):
if file_names[j] == file_names[i]:
found_double = True
# Only rename if valid (known) format
if found_double and matches[i].is_valid():
base_name = file_names[i] + '_'
file_number = _get_free_file_name_number(base_name, file_names, 2)
file_names[i] = base_name + str(file_number)
for file in matches[i].get_file_dict().items():
file_name = os.path.splitext(file[1])[0]
file_extension = os.path.splitext(
file[1])[1].lstrip(
os.path.extsep)
matches[i].rename_file(
file[0],
file_name +
'_' +
str(file_number) +
'.' +
file_extension)
def _get_free_file_name_number(base_name, existing_names, start_number=2):
for i in range(start_number, 100):
# for i in range(start_number, int.-----------):
new_found = True
for j in range(len(existing_names)):
if base_name + str(i) == existing_names[j]:
new_found = False
if new_found:
break
return i
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations:
"""LocalNetworkGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "_models.LocalNetworkGateway",
**kwargs: Any
) -> "_models.LocalNetworkGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "_models.LocalNetworkGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.LocalNetworkGateway"]:
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_12_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs: Any
) -> "_models.LocalNetworkGateway":
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.LocalNetworkGateway":
"""Updates a local network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to update local network gateway tags.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LocalNetworkGatewayListResult"]:
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.LocalNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
| |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TestListInstance.day'
db.add_column('qa_testlistinstance', 'day',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Changing field 'TestListInstance.modified'
db.alter_column('qa_testlistinstance', 'modified', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Deleting field 'TestListInstance.day'
db.delete_column('qa_testlistinstance', 'day')
# Changing field 'TestListInstance.modified'
db.alter_column('qa_testlistinstance', 'modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'qa.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'})
},
'qa.frequency': {
'Meta': {'ordering': "('nominal_interval',)", 'object_name': 'Frequency'},
'due_interval': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'nominal_interval': ('django.db.models.fields.PositiveIntegerField', [], {}),
'overdue_interval': ('django.db.models.fields.PositiveIntegerField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'qa.reference': {
'Meta': {'object_name': 'Reference'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reference_creators'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reference_modifiers'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'numerical'", 'max_length': '15'}),
'value': ('django.db.models.fields.FloatField', [], {})
},
'qa.test': {
'Meta': {'object_name': 'Test'},
'calculation_procedure': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Category']"}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'constant_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_creator'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_modifier'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'procedure': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'simple'", 'max_length': '10'})
},
'qa.testinstance': {
'Meta': {'object_name': 'TestInstance'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_instance_creator'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_instance_modifier'", 'to': "orm['auth.User']"}),
'pass_fail': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'reference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Reference']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'review_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'reviewed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'skipped': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestInstanceStatus']"}),
'string_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'test_list_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestListInstance']", 'null': 'True', 'blank': 'True'}),
'tolerance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Tolerance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'unit_test_info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.UnitTestInfo']"}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'work_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'work_started': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'qa.testinstancestatus': {
'Meta': {'object_name': 'TestInstanceStatus'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'export_by_default': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'requires_review': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'qa.testlist': {
'Meta': {'object_name': 'TestList'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'qa_testlist_created'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'qa_testlist_modified'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'sublists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['qa.TestList']", 'null': 'True', 'blank': 'True'}),
'tests': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['qa.Test']", 'through': "orm['qa.TestListMembership']", 'symmetrical': 'False'})
},
'qa.testlistcycle': {
'Meta': {'object_name': 'TestListCycle'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'qa_testlistcycle_created'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'qa_testlistcycle_modified'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'test_lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['qa.TestList']", 'through': "orm['qa.TestListCycleMembership']", 'symmetrical': 'False'})
},
'qa.testlistcyclemembership': {
'Meta': {'ordering': "('order',)", 'object_name': 'TestListCycleMembership'},
'cycle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestListCycle']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'test_list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestList']"})
},
'qa.testlistinstance': {
'Meta': {'object_name': 'TestListInstance'},
'all_reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_list_instance_creator'", 'to': "orm['auth.User']"}),
'day': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_list_instance_modifier'", 'to': "orm['auth.User']"}),
'reviewed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'reviewed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'test_list_instance_reviewer'", 'null': 'True', 'to': "orm['auth.User']"}),
'test_list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestList']"}),
'unit_test_collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.UnitTestCollection']"}),
'work_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'work_started': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'qa.testlistmembership': {
'Meta': {'ordering': "('order',)", 'unique_together': "(('test_list', 'test'),)", 'object_name': 'TestListMembership'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Test']"}),
'test_list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestList']"})
},
'qa.tolerance': {
'Meta': {'ordering': "['type', 'act_low', 'tol_low', 'tol_high', 'act_high']", 'object_name': 'Tolerance'},
'act_high': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'act_low': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tolerance_creators'", 'to': "orm['auth.User']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mc_pass_choices': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'mc_tol_choices': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tolerance_modifiers'", 'to': "orm['auth.User']"}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'tol_high': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tol_low': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'qa.unittestcollection': {
'Meta': {'unique_together': "(('unit', 'frequency', 'content_type', 'object_id'),)", 'object_name': 'UnitTestCollection'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True'}),
'auto_schedule': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Frequency']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestListInstance']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['units.Unit']"}),
'visible_to': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[]', 'related_name': "'test_collection_visibility'", 'symmetrical': 'False', 'to': "orm['auth.Group']"})
},
'qa.unittestinfo': {
'Meta': {'unique_together': "(['test', 'unit'],)", 'object_name': 'UnitTestInfo'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Reference']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Test']"}),
'tolerance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Tolerance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['units.Unit']"})
},
'units.modality': {
'Meta': {'unique_together': "[('type', 'energy')]", 'object_name': 'Modality'},
'energy': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'units.unit': {
'Meta': {'ordering': "['number']", 'object_name': 'Unit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'install_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'modalities': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['units.Modality']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'serial_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['units.UnitType']"})
},
'units.unittype': {
'Meta': {'unique_together': "[('name', 'model')]", 'object_name': 'UnitType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['qa']
| |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
from datetime import datetime, timedelta
import pytest
from azure.core import MatchConditions
from azure.core.credentials import AzureSasCredential
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, \
ClientAuthenticationError, ResourceModifiedError
from azure.storage.filedatalake import ContentSettings, generate_account_sas, generate_file_sas, \
ResourceTypes, AccountSasPermissions, \
DataLakeFileClient, FileSystemClient, DataLakeDirectoryClient, FileSasPermissions, generate_file_system_sas, \
FileSystemSasPermissions
from azure.storage.filedatalake import DataLakeServiceClient
from settings.testcase import DataLakePreparer
from devtools_testutils.storage import StorageTestCase
# ------------------------------------------------------------------------------
TEST_DIRECTORY_PREFIX = 'directory'
TEST_FILE_PREFIX = 'file'
FILE_PATH = 'file_output.temp.dat'
# ------------------------------------------------------------------------------
class FileTest(StorageTestCase):
def _setUp(self, account_name, account_key):
url = self.account_url(account_name, 'dfs')
self.dsc = DataLakeServiceClient(url, credential=account_key, logging_enable=True)
self.config = self.dsc._config
self.file_system_name = self.get_resource_name('filesystem')
if not self.is_playback():
file_system = self.dsc.get_file_system_client(self.file_system_name)
try:
file_system.create_file_system(timeout=5)
except ResourceExistsError:
pass
def tearDown(self):
if not self.is_playback():
try:
self.dsc.delete_file_system(self.file_system_name)
except:
pass
return super(FileTest, self).tearDown()
# --Helpers-----------------------------------------------------------------
def _get_directory_reference(self, prefix=TEST_DIRECTORY_PREFIX):
directory_name = self.get_resource_name(prefix)
return directory_name
def _get_file_reference(self, prefix=TEST_FILE_PREFIX):
file_name = self.get_resource_name(prefix)
return file_name
def _create_file_system(self):
return self.dsc.create_file_system(self._get_file_system_reference())
def _create_directory_and_return_client(self, directory=None):
directory_name = directory if directory else self._get_directory_reference()
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
return directory_client
def _create_file_and_return_client(self, directory="", file=None):
if directory:
self._create_directory_and_return_client(directory)
if not file:
file = self._get_file_reference()
file_client = self.dsc.get_file_client(self.file_system_name, directory + '/' + file)
file_client.create_file()
return file_client
# --Helpers-----------------------------------------------------------------
@DataLakePreparer()
def test_create_file(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# Arrange
directory_name = self._get_directory_reference()
# Create a directory to put the file under that
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
file_client = directory_client.get_file_client('filename')
response = file_client.create_file()
# Assert
self.assertIsNotNone(response)
@DataLakePreparer()
def test_file_exists(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# Arrange
directory_name = self._get_directory_reference()
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
file_client1 = directory_client.get_file_client('filename')
file_client2 = directory_client.get_file_client('nonexistentfile')
file_client1.create_file()
self.assertTrue(file_client1.exists())
self.assertFalse(file_client2.exists())
@DataLakePreparer()
def test_create_file_using_oauth_token_credential(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# Arrange
file_name = self._get_file_reference()
token_credential = self.generate_oauth_token()
# Create a directory to put the file under that
file_client = DataLakeFileClient(self.dsc.url, self.file_system_name, file_name,
credential=token_credential)
response = file_client.create_file()
# Assert
self.assertIsNotNone(response)
@DataLakePreparer()
def test_create_file_with_existing_name(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# Arrange
file_client = self._create_file_and_return_client()
with self.assertRaises(ResourceExistsError):
# if the file exists then throw error
# if_none_match='*' is to make sure no existing file
file_client.create_file(match_condition=MatchConditions.IfMissing)
@DataLakePreparer()
def test_create_file_with_lease_id(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# Arrange
directory_name = self._get_directory_reference()
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
file_client = directory_client.get_file_client('filename')
# Act
file_client.create_file()
lease = file_client.acquire_lease()
create_resp = file_client.create_file(lease=lease)
# Assert
file_properties = file_client.get_file_properties()
self.assertIsNotNone(file_properties)
self.assertEqual(file_properties.etag, create_resp.get('etag'))
self.assertEqual(file_properties.last_modified, create_resp.get('last_modified'))
@DataLakePreparer()
def test_create_file_under_root_directory(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# Arrange
# get a file client to interact with the file under root directory
file_client = self.dsc.get_file_client(self.file_system_name, "filename")
response = file_client.create_file()
# Assert
self.assertIsNotNone(response)
@DataLakePreparer()
def test_append_data(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
directory_name = self._get_directory_reference()
# Create a directory to put the file under that
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
file_client = directory_client.get_file_client('filename')
file_client.create_file()
# Act
response = file_client.append_data(b'abc', 0, 3)
self.assertIsNotNone(response)
@DataLakePreparer()
def test_append_empty_data(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
file_client = self._create_file_and_return_client()
# Act
file_client.flush_data(0)
file_props = file_client.get_file_properties()
self.assertIsNotNone(file_props['size'], 0)
@DataLakePreparer()
def test_flush_data(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
directory_name = self._get_directory_reference()
# Create a directory to put the file under that
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
file_client = directory_client.get_file_client('filename')
file_client.create_file()
# Act
file_client.append_data(b'abc', 0, 3)
response = file_client.flush_data(3)
# Assert
prop = file_client.get_file_properties()
self.assertIsNotNone(response)
self.assertEqual(prop['size'], 3)
@DataLakePreparer()
def test_flush_data_with_match_condition(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
directory_name = self._get_directory_reference()
# Create a directory to put the file under that
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
file_client = directory_client.get_file_client('filename')
resp = file_client.create_file()
# Act
file_client.append_data(b'abc', 0, 3)
# flush is successful because it isn't touched
response = file_client.flush_data(3, etag=resp['etag'], match_condition=MatchConditions.IfNotModified)
file_client.append_data(b'abc', 3, 3)
with self.assertRaises(ResourceModifiedError):
# flush is unsuccessful because extra data were appended.
file_client.flush_data(6, etag=resp['etag'], match_condition=MatchConditions.IfNotModified)
@pytest.mark.live_test_only
@DataLakePreparer()
def test_upload_data_to_none_existing_file(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# parallel upload cannot be recorded
directory_name = self._get_directory_reference()
# Create a directory to put the file under that
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
file_client = directory_client.get_file_client('filename')
data = self.get_random_bytes(200*1024)
file_client.upload_data(data, overwrite=True, max_concurrency=3)
downloaded_data = file_client.download_file().readall()
self.assertEqual(data, downloaded_data)
@pytest.mark.live_test_only
@DataLakePreparer()
def test_upload_data_in_substreams(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# parallel upload cannot be recorded
directory_name = self._get_directory_reference()
# Create a directory to put the file under that
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
file_client = directory_client.get_file_client('filename')
# Get 16MB data
data = self.get_random_bytes(16*1024*1024)
# Ensure chunk size is greater than threshold (8MB > 4MB) - for optimized upload
file_client.upload_data(data, chunk_size=8*1024*1024, overwrite=True, max_concurrency=3)
downloaded_data = file_client.download_file().readall()
self.assertEqual(data, downloaded_data)
# Run on single thread
file_client.upload_data(data, chunk_size=8*1024*1024, overwrite=True)
downloaded_data = file_client.download_file().readall()
self.assertEqual(data, downloaded_data)
@DataLakePreparer()
def test_upload_data_to_existing_file(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
directory_name = self._get_directory_reference()
# Create a directory to put the file under that
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
# create an existing file
file_client = directory_client.get_file_client('filename')
file_client.create_file()
file_client.append_data(b"abc", 0)
file_client.flush_data(3)
# to override the existing file
data = self.get_random_bytes(100)
with self.assertRaises(HttpResponseError):
file_client.upload_data(data, max_concurrency=5)
file_client.upload_data(data, overwrite=True, max_concurrency=5)
downloaded_data = file_client.download_file().readall()
self.assertEqual(data, downloaded_data)
@DataLakePreparer()
def test_upload_data_to_existing_file_with_content_settings(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
directory_name = self._get_directory_reference()
# Create a directory to put the file under that
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
# create an existing file
file_client = directory_client.get_file_client('filename')
etag = file_client.create_file()['etag']
# to override the existing file
data = self.get_random_bytes(100)
content_settings = ContentSettings(
content_language='spanish',
content_disposition='inline')
file_client.upload_data(data, max_concurrency=5,
content_settings=content_settings, etag=etag,
match_condition=MatchConditions.IfNotModified)
downloaded_data = file_client.download_file().readall()
properties = file_client.get_file_properties()
self.assertEqual(data, downloaded_data)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
@DataLakePreparer()
def test_upload_data_to_existing_file_with_permission_and_umask(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
directory_name = self._get_directory_reference()
# Create a directory to put the file under that
directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name)
directory_client.create_directory()
# create an existing file
file_client = directory_client.get_file_client('filename')
etag = file_client.create_file()['etag']
# to override the existing file
data = self.get_random_bytes(100)
file_client.upload_data(data, overwrite=True, max_concurrency=5,
permissions='0777', umask="0000",
etag=etag,
match_condition=MatchConditions.IfNotModified)
downloaded_data = file_client.download_file().readall()
prop = file_client.get_access_control()
# Assert
self.assertEqual(data, downloaded_data)
self.assertEqual(prop['permissions'], 'rwxrwxrwx')
@DataLakePreparer()
def test_read_file(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
file_client = self._create_file_and_return_client()
data = self.get_random_bytes(1024)
# upload data to file
file_client.append_data(data, 0, len(data))
file_client.flush_data(len(data))
# doanload the data and make sure it is the same as uploaded data
downloaded_data = file_client.download_file().readall()
self.assertEqual(data, downloaded_data)
@pytest.mark.live_test_only
@DataLakePreparer()
def test_read_file_with_user_delegation_key(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# SAS URL is calculated from storage key, so this test runs live only
# Create file
file_client = self._create_file_and_return_client()
data = self.get_random_bytes(1024)
# Upload data to file
file_client.append_data(data, 0, len(data))
file_client.flush_data(len(data))
# Get user delegation key
token_credential = self.generate_oauth_token()
service_client = DataLakeServiceClient(self.account_url(datalake_storage_account_name, 'dfs'), credential=token_credential, logging_enable=True)
user_delegation_key = service_client.get_user_delegation_key(datetime.utcnow(),
datetime.utcnow() + timedelta(hours=1))
sas_token = generate_file_sas(file_client.account_name,
file_client.file_system_name,
None,
file_client.path_name,
user_delegation_key,
permission=FileSasPermissions(read=True, create=True, write=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# doanload the data and make sure it is the same as uploaded data
new_file_client = DataLakeFileClient(self.account_url(datalake_storage_account_name, 'dfs'),
file_client.file_system_name,
file_client.path_name,
credential=sas_token, logging_enable=True)
downloaded_data = new_file_client.download_file().readall()
self.assertEqual(data, downloaded_data)
@pytest.mark.live_test_only
@DataLakePreparer()
def test_set_acl_with_user_delegation_key(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# SAS URL is calculated from storage key, so this test runs live only
# Create file
file_client = self._create_file_and_return_client()
data = self.get_random_bytes(1024)
# Upload data to file
file_client.append_data(data, 0, len(data))
file_client.flush_data(len(data))
# Get user delegation key
token_credential = self.generate_oauth_token()
service_client = DataLakeServiceClient(self.account_url(datalake_storage_account_name, 'dfs'), credential=token_credential)
user_delegation_key = service_client.get_user_delegation_key(datetime.utcnow(),
datetime.utcnow() + timedelta(hours=1))
sas_token = generate_file_sas(file_client.account_name,
file_client.file_system_name,
None,
file_client.path_name,
user_delegation_key,
permission=FileSasPermissions(execute=True, manage_access_control=True,
manage_ownership=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# doanload the data and make sure it is the same as uploaded data
new_file_client = DataLakeFileClient(self.account_url(datalake_storage_account_name, 'dfs'),
file_client.file_system_name,
file_client.path_name,
credential=sas_token)
acl = 'user::rwx,group::r-x,other::rwx'
owner = "dc140949-53b7-44af-b1e9-cd994951fb86"
new_file_client.set_access_control(acl=acl, owner=owner)
access_control = new_file_client.get_access_control()
self.assertEqual(acl, access_control['acl'])
self.assertEqual(owner, access_control['owner'])
@pytest.mark.live_test_only
@DataLakePreparer()
def test_preauthorize_user_with_user_delegation_key(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# SAS URL is calculated from storage key, so this test runs live only
# Create file
file_client = self._create_file_and_return_client()
data = self.get_random_bytes(1024)
# Upload data to file
file_client.append_data(data, 0, len(data))
file_client.flush_data(len(data))
file_client.set_access_control(owner="68390a19-a643-458b-b726-408abf67b4fc", permissions='0777')
acl = file_client.get_access_control()
# Get user delegation key
token_credential = self.generate_oauth_token()
service_client = DataLakeServiceClient(self.account_url(datalake_storage_account_name, 'dfs'), credential=token_credential)
user_delegation_key = service_client.get_user_delegation_key(datetime.utcnow(),
datetime.utcnow() + timedelta(hours=1))
sas_token = generate_file_sas(file_client.account_name,
file_client.file_system_name,
None,
file_client.path_name,
user_delegation_key,
permission=FileSasPermissions(read=True, write=True, manage_access_control=True,
manage_ownership=True),
expiry=datetime.utcnow() + timedelta(hours=1),
preauthorized_agent_object_id="68390a19-a643-458b-b726-408abf67b4fc"
)
# doanload the data and make sure it is the same as uploaded data
new_file_client = DataLakeFileClient(self.account_url(datalake_storage_account_name, 'dfs'),
file_client.file_system_name,
file_client.path_name,
credential=sas_token)
acl = new_file_client.set_access_control(permissions='0777')
self.assertIsNotNone(acl)
@DataLakePreparer()
def test_read_file_into_file(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
file_client = self._create_file_and_return_client()
data = self.get_random_bytes(1024)
# upload data to file
file_client.append_data(data, 0, len(data))
file_client.flush_data(len(data))
# doanload the data into a file and make sure it is the same as uploaded data
with open(FILE_PATH, 'wb') as stream:
download = file_client.download_file(max_concurrency=2)
download.readinto(stream)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
@DataLakePreparer()
def test_read_file_to_text(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
file_client = self._create_file_and_return_client()
data = self.get_random_text_data(1024)
# upload data to file
file_client.append_data(data, 0, len(data))
file_client.flush_data(len(data))
# doanload the text data and make sure it is the same as uploaded data
downloaded_data = file_client.download_file(max_concurrency=2, encoding="utf-8").readall()
# Assert
self.assertEqual(data, downloaded_data)
@pytest.mark.live_test_only
@DataLakePreparer()
def test_account_sas(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# SAS URL is calculated from storage key, so this test runs live only
file_name = self._get_file_reference()
# create a file under root directory
self._create_file_and_return_client(file=file_name)
# generate a token with file level read permission
token = generate_account_sas(
self.dsc.account_name,
self.dsc.credential.account_key,
ResourceTypes(file_system=True, object=True),
AccountSasPermissions(read=True),
datetime.utcnow() + timedelta(hours=1),
)
for credential in [token, AzureSasCredential(token)]:
# read the created file which is under root directory
file_client = DataLakeFileClient(self.dsc.url, self.file_system_name, file_name, credential=credential)
properties = file_client.get_file_properties()
# make sure we can read the file properties
self.assertIsNotNone(properties)
# try to write to the created file with the token
with self.assertRaises(HttpResponseError):
file_client.append_data(b"abcd", 0, 4)
@DataLakePreparer()
def test_account_sas_raises_if_sas_already_in_uri(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
with self.assertRaises(ValueError):
DataLakeFileClient(self.dsc.url + "?sig=foo", self.file_system_name, "foo", credential=AzureSasCredential("?foo=bar"))
@pytest.mark.live_test_only
@DataLakePreparer()
def test_file_sas_only_applies_to_file_level(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# SAS URL is calculated from storage key, so this test runs live only
file_name = self._get_file_reference()
directory_name = self._get_directory_reference()
self._create_file_and_return_client(directory=directory_name, file=file_name)
# generate a token with file level read and write permissions
token = generate_file_sas(
self.dsc.account_name,
self.file_system_name,
directory_name,
file_name,
self.dsc.credential.account_key,
permission=FileSasPermissions(read=True, write=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# read the created file which is under root directory
file_client = DataLakeFileClient(self.dsc.url, self.file_system_name, directory_name+'/'+file_name,
credential=token)
properties = file_client.get_file_properties()
# make sure we can read the file properties
self.assertIsNotNone(properties)
# try to write to the created file with the token
response = file_client.append_data(b"abcd", 0, 4, validate_content=True)
self.assertIsNotNone(response)
# the token is for file level, so users are not supposed to have access to file system level operations
file_system_client = FileSystemClient(self.dsc.url, self.file_system_name, credential=token)
with self.assertRaises(ClientAuthenticationError):
file_system_client.get_file_system_properties()
# the token is for file level, so users are not supposed to have access to directory level operations
directory_client = DataLakeDirectoryClient(self.dsc.url, self.file_system_name, directory_name,
credential=token)
with self.assertRaises(ClientAuthenticationError):
directory_client.get_directory_properties()
@DataLakePreparer()
def test_delete_file(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# Arrange
file_client = self._create_file_and_return_client()
file_client.delete_file()
with self.assertRaises(ResourceNotFoundError):
file_client.get_file_properties()
@DataLakePreparer()
def test_delete_file_with_if_unmodified_since(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# Arrange
file_client = self._create_file_and_return_client()
prop = file_client.get_file_properties()
file_client.delete_file(if_unmodified_since=prop['last_modified'])
# Make sure the file was deleted
with self.assertRaises(ResourceNotFoundError):
file_client.get_file_properties()
@DataLakePreparer()
def test_set_access_control(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
file_client = self._create_file_and_return_client()
response = file_client.set_access_control(permissions='0777')
# Assert
self.assertIsNotNone(response)
@DataLakePreparer()
def test_set_access_control_with_match_conditions(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
file_client = self._create_file_and_return_client()
with self.assertRaises(ResourceModifiedError):
file_client.set_access_control(permissions='0777', match_condition=MatchConditions.IfMissing)
@DataLakePreparer()
def test_get_access_control(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
file_client = self._create_file_and_return_client()
file_client.set_access_control(permissions='0777')
# Act
response = file_client.get_access_control()
# Assert
self.assertIsNotNone(response)
@DataLakePreparer()
def test_get_access_control_with_if_modified_since(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
file_client = self._create_file_and_return_client()
file_client.set_access_control(permissions='0777')
prop = file_client.get_file_properties()
# Act
response = file_client.get_access_control(if_modified_since=prop['last_modified']-timedelta(minutes=15))
# Assert
self.assertIsNotNone(response)
@DataLakePreparer()
def test_set_access_control_recursive(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
acl = 'user::rwx,group::r-x,other::rwx'
file_client = self._create_file_and_return_client()
summary = file_client.set_access_control_recursive(acl=acl)
# Assert
self.assertEqual(summary.counters.directories_successful, 0)
self.assertEqual(summary.counters.files_successful, 1)
self.assertEqual(summary.counters.failure_count, 0)
access_control = file_client.get_access_control()
self.assertIsNotNone(access_control)
self.assertEqual(acl, access_control['acl'])
@DataLakePreparer()
def test_update_access_control_recursive(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
acl = 'user::rwx,group::r-x,other::rwx'
file_client = self._create_file_and_return_client()
summary = file_client.update_access_control_recursive(acl=acl)
# Assert
self.assertEqual(summary.counters.directories_successful, 0)
self.assertEqual(summary.counters.files_successful, 1)
self.assertEqual(summary.counters.failure_count, 0)
access_control = file_client.get_access_control()
self.assertIsNotNone(access_control)
self.assertEqual(acl, access_control['acl'])
@DataLakePreparer()
def test_remove_access_control_recursive(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
acl = "mask," + "default:user,default:group," + \
"user:ec3595d6-2c17-4696-8caa-7e139758d24a,group:ec3595d6-2c17-4696-8caa-7e139758d24a," + \
"default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,default:group:ec3595d6-2c17-4696-8caa-7e139758d24a"
file_client = self._create_file_and_return_client()
summary = file_client.remove_access_control_recursive(acl=acl)
# Assert
self.assertEqual(summary.counters.directories_successful, 0)
self.assertEqual(summary.counters.files_successful, 1)
self.assertEqual(summary.counters.failure_count, 0)
@DataLakePreparer()
def test_get_properties(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# Arrange
directory_client = self._create_directory_and_return_client()
metadata = {'hello': 'world', 'number': '42'}
content_settings = ContentSettings(
content_language='spanish',
content_disposition='inline')
file_client = directory_client.create_file("newfile", metadata=metadata, content_settings=content_settings)
file_client.append_data(b"abc", 0, 3)
file_client.flush_data(3)
properties = file_client.get_file_properties()
# Assert
self.assertTrue(properties)
self.assertEqual(properties.size, 3)
self.assertEqual(properties.metadata['hello'], metadata['hello'])
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
@DataLakePreparer()
def test_set_expiry(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# Arrange
directory_client = self._create_directory_and_return_client()
metadata = {'hello': 'world', 'number': '42'}
content_settings = ContentSettings(
content_language='spanish',
content_disposition='inline')
expires_on = datetime.utcnow() + timedelta(hours=1)
file_client = directory_client.create_file("newfile", metadata=metadata, content_settings=content_settings)
file_client.set_file_expiry("Absolute", expires_on=expires_on)
properties = file_client.get_file_properties()
# Assert
self.assertTrue(properties)
self.assertIsNotNone(properties.expiry_time)
@DataLakePreparer()
def test_rename_file_with_non_used_name(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
file_client = self._create_file_and_return_client()
data_bytes = b"abc"
file_client.append_data(data_bytes, 0, 3)
file_client.flush_data(3)
new_client = file_client.rename_file(file_client.file_system_name+'/'+'newname')
data = new_client.download_file().readall()
self.assertEqual(data, data_bytes)
self.assertEqual(new_client.path_name, "newname")
@pytest.mark.live_test_only
@DataLakePreparer()
def test_rename_file_with_file_system_sas(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# sas token is calculated from storage key, so live only
token = generate_file_system_sas(
self.dsc.account_name,
self.file_system_name,
self.dsc.credential.account_key,
FileSystemSasPermissions(write=True, read=True, delete=True),
datetime.utcnow() + timedelta(hours=1),
)
# read the created file which is under root directory
file_client = DataLakeFileClient(self.dsc.url, self.file_system_name, "oldfile", credential=token)
file_client.create_file()
data_bytes = b"abc"
file_client.append_data(data_bytes, 0, 3)
file_client.flush_data(3)
new_client = file_client.rename_file(file_client.file_system_name+'/'+'newname')
data = new_client.download_file().readall()
self.assertEqual(data, data_bytes)
self.assertEqual(new_client.path_name, "newname")
@pytest.mark.live_test_only
@DataLakePreparer()
def test_rename_file_with_file_sas(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# SAS URL is calculated from storage key, so this test runs live only
token = generate_file_sas(self.dsc.account_name,
self.file_system_name,
None,
"oldfile",
datalake_storage_account_key,
permission=FileSasPermissions(read=True, create=True, write=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
new_token = generate_file_sas(self.dsc.account_name,
self.file_system_name,
None,
"newname",
datalake_storage_account_key,
permission=FileSasPermissions(read=True, create=True, write=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# read the created file which is under root directory
file_client = DataLakeFileClient(self.dsc.url, self.file_system_name, "oldfile", credential=token)
file_client.create_file()
data_bytes = b"abc"
file_client.append_data(data_bytes, 0, 3)
file_client.flush_data(3)
new_client = file_client.rename_file(file_client.file_system_name+'/'+'newname'+'?'+new_token)
data = new_client.download_file().readall()
self.assertEqual(data, data_bytes)
self.assertEqual(new_client.path_name, "newname")
@DataLakePreparer()
def test_rename_file_with_account_sas(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
pytest.skip("service bug")
token = generate_account_sas(
self.dsc.account_name,
self.dsc.credential.account_key,
ResourceTypes(object=True),
AccountSasPermissions(write=True, read=True, create=True, delete=True),
datetime.utcnow() + timedelta(hours=5),
)
# read the created file which is under root directory
file_client = DataLakeFileClient(self.dsc.url, self.file_system_name, "oldfile", credential=token)
file_client.create_file()
data_bytes = b"abc"
file_client.append_data(data_bytes, 0, 3)
file_client.flush_data(3)
new_client = file_client.rename_file(file_client.file_system_name+'/'+'newname')
data = new_client.download_file().readall()
self.assertEqual(data, data_bytes)
self.assertEqual(new_client.path_name, "newname")
@DataLakePreparer()
def test_rename_file_to_existing_file(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# create the existing file
existing_file_client = self._create_file_and_return_client(file="existingfile")
existing_file_client.append_data(b"a", 0, 1)
existing_file_client.flush_data(1)
old_url = existing_file_client.url
# prepare to rename the file to the existing file
file_client = self._create_file_and_return_client()
data_bytes = b"abc"
file_client.append_data(data_bytes, 0, 3)
file_client.flush_data(3)
new_client = file_client.rename_file(file_client.file_system_name+'/'+existing_file_client.path_name)
new_url = file_client.url
data = new_client.download_file().readall()
# the existing file was overridden
self.assertEqual(data, data_bytes)
@DataLakePreparer()
def test_rename_file_will_not_change_existing_directory(self, datalake_storage_account_name, datalake_storage_account_key):
self._setUp(datalake_storage_account_name, datalake_storage_account_key)
# create none empty directory(with 2 files)
dir1 = self._create_directory_and_return_client(directory="dir1")
f1 = dir1.create_file("file1")
f1.append_data(b"file1", 0, 5)
f1.flush_data(5)
f2 = dir1.create_file("file2")
f2.append_data(b"file2", 0, 5)
f2.flush_data(5)
# create another none empty directory(with 2 files)
dir2 = self._create_directory_and_return_client(directory="dir2")
f3 = dir2.create_file("file3")
f3.append_data(b"file3", 0, 5)
f3.flush_data(5)
f4 = dir2.create_file("file4")
f4.append_data(b"file4", 0, 5)
f4.flush_data(5)
new_client = f3.rename_file(f1.file_system_name+'/'+f1.path_name)
self.assertEqual(new_client.download_file().readall(), b"file3")
# make sure the data in file2 and file4 weren't touched
f2_data = f2.download_file().readall()
self.assertEqual(f2_data, b"file2")
f4_data = f4.download_file().readall()
self.assertEqual(f4_data, b"file4")
with self.assertRaises(HttpResponseError):
f3.download_file().readall()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| |
# encoding=utf8
import os
import random
import numpy as np
import FukuML.Utility as utility
import FukuML.MLBase as ml
random.seed(1)
class NeuralNetwork(ml.Learner):
def __init__(self):
"""init"""
self.status = 'empty'
self.train_X = []
self.train_Y = []
self.W = []
self.data_num = 0
self.data_demension = 0
self.test_X = []
self.test_Y = []
self.feature_transform_mode = ''
self.feature_transform_degree = 1
self.network_structure = []
self.w_range_high = 0.1
self.w_range_low = -0.1
self.feed_mode = 'stochastic'
self.step_eta = 0.01
self.updates = 50000
def load_train_data(self, input_data_file=''):
self.status = 'load_train_data'
if (input_data_file == ''):
input_data_file = os.path.normpath(os.path.join(os.path.join(os.getcwd(), os.path.dirname(__file__)), "dataset/neural_network_train.dat"))
else:
if (os.path.isfile(input_data_file) is not True):
print("Please make sure input_data_file path is correct.")
return self.train_X, self.train_Y
self.train_X, self.train_Y = utility.DatasetLoader.load(input_data_file)
return self.train_X, self.train_Y
def load_test_data(self, input_data_file=''):
if (input_data_file == ''):
input_data_file = os.path.normpath(os.path.join(os.path.join(os.getcwd(), os.path.dirname(__file__)), "dataset/neural_network_test.dat"))
else:
if (os.path.isfile(input_data_file) is not True):
print("Please make sure input_data_file path is correct.")
return self.test_X, self.test_Y
self.test_X, self.test_Y = utility.DatasetLoader.load(input_data_file)
if (self.feature_transform_mode == 'polynomial') or (self.feature_transform_mode == 'legendre'):
self.test_X = self.test_X[:, 1:]
self.test_X = utility.DatasetLoader.feature_transform(
self.test_X,
self.feature_transform_mode,
self.feature_transform_degree
)
return self.test_X, self.test_Y
def set_param(self, network_structure=[8, 3, 1], w_range_high=0.1, w_range_low=-0.1, feed_mode='stochastic', step_eta=0.01, updates=50000):
self.network_structure = network_structure
self.w_range_high = w_range_high
self.w_range_low = w_range_low
self.feed_mode = feed_mode
self.step_eta = step_eta
self.updates = updates
return self.network_structure, self.w_range_high, self.w_range_low, self.feed_mode, self.step_eta, self.updates
def init_W(self):
if (self.status != 'load_train_data') and (self.status != 'train'):
print("Please load train data first.")
return self.W
self.status = 'init'
self.data_num = len(self.train_Y)
self.data_demension = len(self.train_X[0])
self.network_structure.insert(0, self.data_demension - 1)
for i in range(1, len(self.network_structure)):
this_layer_w = np.random.uniform(self.w_range_low, self.w_range_high, (self.network_structure[i - 1] + 1, self.network_structure[i]))
self.W.append(this_layer_w)
return self.W
def score_function(self, x, W):
y_predict = x[1:]
for i in range(0, len(W), 1):
y_predict = np.tanh(np.dot(np.hstack((1, y_predict)), W[i]))
score = y_predict[0]
return score
def error_function(self, y_prediction, y_truth):
error = (y_prediction - y_truth) ** 2
return error
def calculate_avg_error(self, X, Y, W):
return super(NeuralNetwork, self).calculate_avg_error(X, Y, W)
def calculate_test_data_avg_error(self):
return super(NeuralNetwork, self).calculate_test_data_avg_error()
def tanh_prime(self, s):
tanh_prime_output = np.zeros(s.shape)
for i in range(s.shape[0]):
tanh_prime_output[i] = 4.0 / (np.exp(2 * s[i]) + np.exp(-2 * s[i]) + 2)
return tanh_prime_output
def forward_process(self, x, y, W):
forward_output = []
pre_x = x
for i in range(len(W)):
pre_x = np.tanh(np.dot(pre_x, W[i]))
forward_output.append(pre_x)
pre_x = np.hstack((1, pre_x))
return forward_output
def backward_process(self, x, y, W, neuron_output):
backward_output = []
layer_num = len(neuron_output)
score = np.dot(np.hstack((1, neuron_output[layer_num - 2])), W[layer_num - 1])
error_gradient = np.array([-2 * (y - neuron_output[layer_num - 1][0]) * self.tanh_prime(score)])
# error_gradient = np.array([np.sum(-2 * (y - score) * np.hstack((1, neuron_output[layer_num-2])))])
backward_output.insert(0, error_gradient)
# Hidden layer
for i in range(layer_num - 2, -1, -1):
if i == 0:
score = np.dot(x, W[i])
else:
score = np.dot(np.hstack((1, neuron_output[i - 1])), W[i])
error_gradient = np.dot(error_gradient, W[i + 1][1:].transpose()) * self.tanh_prime(score)
backward_output.insert(0, error_gradient)
return backward_output
def update_W_with_gradient_decent(self, x, neuron_output, error_gradient):
w_output = []
layer_num = len(self.W)
w_output.append(self.W[0] - self.step_eta * np.array([x]).transpose() * error_gradient[0])
for i in range(1, layer_num, 1):
w_output.append(self.W[i] - self.step_eta * np.array([np.hstack((1, neuron_output[i - 1]))]).transpose() * error_gradient[i])
return w_output
def train(self):
if (self.status != 'init'):
print("Please load train data and init W first.")
return self.W
self.status = 'train'
for i in range(0, self.updates):
stochastic_i = random.randint(0, self.data_num - 1)
x = self.train_X[stochastic_i]
y = self.train_Y[stochastic_i]
neuron_output = self.forward_process(x, y, self.W)
error_gradient = self.backward_process(x, y, self.W, neuron_output)
self.W = self.update_W_with_gradient_decent(x, neuron_output, error_gradient)
return self.W
def prediction(self, input_data='', mode='test_data'):
return super(NeuralNetwork, self).prediction(input_data, mode)
class BinaryClassifier(NeuralNetwork):
def __init__(self):
"""init"""
self.status = 'empty'
self.train_X = []
self.train_Y = []
self.W = []
self.data_num = 0
self.data_demension = 0
self.test_X = []
self.test_Y = []
self.feature_transform_mode = ''
self.feature_transform_degree = 1
self.network_structure = []
self.w_range_high = 0.1
self.w_range_low = -0.1
self.feed_mode = 'stochastic'
self.step_eta = 0.01
self.updates = 50000
def load_train_data(self, input_data_file=''):
return super(BinaryClassifier, self).load_train_data(input_data_file)
def load_test_data(self, input_data_file=''):
return super(BinaryClassifier, self).load_test_data(input_data_file)
def set_param(self, network_structure=[8, 3, 1], w_range_high=0.1, w_range_low=-0.1, feed_mode='stochastic', step_eta=0.01, updates=50000):
return super(BinaryClassifier, self).set_param(network_structure, w_range_high, w_range_low, feed_mode, step_eta, updates)
def init_W(self):
return super(BinaryClassifier, self).init_W()
def score_function(self, x, W):
score = super(BinaryClassifier, self).score_function(x, W)
score = np.sign(score)
return score
def error_function(self, y_prediction, y_truth):
if y_prediction != y_truth:
return 1
else:
return 0
def calculate_avg_error(self, X, Y, W):
return super(BinaryClassifier, self).calculate_avg_error(X, Y, W)
def calculate_test_data_avg_error(self):
return super(BinaryClassifier, self).calculate_test_data_avg_error()
def tanh_prime(self, s):
return super(BinaryClassifier, self).tanh_prime(s)
def forward_process(self, x, y, W):
return super(BinaryClassifier, self).forward_process(x, y, W)
def backward_process(self, x, y, W, neuron_output):
return super(BinaryClassifier, self).backward_process(x, y, W, neuron_output)
def update_W_with_gradient_decent(self, x, neuron_output, error_gradient):
return super(BinaryClassifier, self).update_W_with_gradient_decent(x, neuron_output, error_gradient)
def train(self):
return super(BinaryClassifier, self).train()
def prediction(self, input_data='', mode='test_data'):
return super(BinaryClassifier, self).prediction(input_data, mode)
| |
from __future__ import absolute_import, print_function, division
from pony.py23compat import itervalues, basestring
from operator import attrgetter
from pony.orm import core
from pony.orm.core import log_sql, DBSchemaError
from pony.utils import throw
class DBSchema(object):
dialect = None
inline_fk_syntax = True
named_foreign_keys = True
def __init__(schema, provider, uppercase=True):
schema.provider = provider
schema.tables = {}
schema.constraints = {}
schema.indent = ' '
schema.command_separator = ';\n\n'
schema.uppercase = uppercase
schema.names = {}
def column_list(schema, columns):
quote_name = schema.provider.quote_name
return '(%s)' % ', '.join(quote_name(column.name) for column in columns)
def case(schema, s):
if schema.uppercase: return s.upper().replace('%S', '%s') \
.replace(')S', ')s').replace('%R', '%r').replace(')R', ')r')
else: return s.lower()
def add_table(schema, table_name):
return schema.table_class(table_name, schema)
def order_tables_to_create(schema):
tables = []
created_tables = set()
tables_to_create = sorted(itervalues(schema.tables), key=lambda table: table.name)
while tables_to_create:
for table in tables_to_create:
if table.parent_tables.issubset(created_tables):
created_tables.add(table)
tables_to_create.remove(table)
break
else: table = tables_to_create.pop()
tables.append(table)
return tables
def generate_create_script(schema):
created_tables = set()
commands = []
for table in schema.order_tables_to_create():
for db_object in table.get_objects_to_create(created_tables):
commands.append(db_object.get_create_command())
return schema.command_separator.join(commands)
def create_tables(schema, provider, connection):
created_tables = set()
for table in schema.order_tables_to_create():
for db_object in table.get_objects_to_create(created_tables):
name = db_object.exists(provider, connection, case_sensitive=False)
if name is None: db_object.create(provider, connection)
elif name != db_object.name:
quote_name = schema.provider.quote_name
n1, n2 = quote_name(db_object.name), quote_name(name)
tn1, tn2 = db_object.typename, db_object.typename.lower()
throw(DBSchemaError, '%s %s cannot be created, because %s %s ' \
'(with a different letter case) already exists in the database. ' \
'Try to delete %s %s first.' % (tn1, n1, tn2, n2, n2, tn2))
def check_tables(schema, provider, connection):
cursor = connection.cursor()
for table in sorted(itervalues(schema.tables), key=lambda table: table.name):
if isinstance(table.name, tuple): alias = table.name[-1]
elif isinstance(table.name, basestring): alias = table.name
else: assert False # pragma: no cover
sql_ast = [ 'SELECT',
[ 'ALL', ] + [ [ 'COLUMN', alias, column.name ] for column in table.column_list ],
[ 'FROM', [ alias, 'TABLE', table.name ] ],
[ 'WHERE', [ 'EQ', [ 'VALUE', 0 ], [ 'VALUE', 1 ] ] ]
]
sql, adapter = provider.ast2sql(sql_ast)
if core.debug: log_sql(sql)
provider.execute(cursor, sql)
class DBObject(object):
def create(table, provider, connection):
sql = table.get_create_command()
if core.debug: log_sql(sql)
cursor = connection.cursor()
provider.execute(cursor, sql)
class Table(DBObject):
typename = 'Table'
def __init__(table, name, schema):
if name in schema.tables:
throw(DBSchemaError, "Table %r already exists in database schema" % name)
if name in schema.names:
throw(DBSchemaError, "Table %r cannot be created, name is already in use" % name)
schema.tables[name] = table
schema.names[name] = table
table.schema = schema
table.name = name
table.column_list = []
table.column_dict = {}
table.indexes = {}
table.pk_index = None
table.foreign_keys = {}
table.parent_tables = set()
table.child_tables = set()
table.entities = set()
table.m2m = set()
def __repr__(table):
table_name = table.name
if isinstance(table_name, tuple):
table_name = '.'.join(table_name)
return '<Table(%s)>' % table_name
def exists(table, provider, connection, case_sensitive=True):
return provider.table_exists(connection, table.name, case_sensitive)
def get_create_command(table):
schema = table.schema
case = schema.case
provider = schema.provider
quote_name = provider.quote_name
if_not_exists = False # provider.table_if_not_exists_syntax and provider.index_if_not_exists_syntax
cmd = []
if not if_not_exists: cmd.append(case('CREATE TABLE %s (') % quote_name(table.name))
else: cmd.append(case('CREATE TABLE IF NOT EXISTS %s (') % quote_name(table.name))
for column in table.column_list:
cmd.append(schema.indent + column.get_sql() + ',')
if len(table.pk_index.columns) > 1:
cmd.append(schema.indent + table.pk_index.get_sql() + ',')
indexes = [ index for index in itervalues(table.indexes)
if not index.is_pk and index.is_unique and len(index.columns) > 1 ]
for index in indexes: assert index.name is not None
indexes.sort(key=attrgetter('name'))
for index in indexes: cmd.append(schema.indent+index.get_sql() + ',')
if not schema.named_foreign_keys:
for foreign_key in sorted(itervalues(table.foreign_keys), key=lambda fk: fk.name):
if schema.inline_fk_syntax and len(foreign_key.child_columns) == 1: continue
cmd.append(schema.indent+foreign_key.get_sql() + ',')
cmd[-1] = cmd[-1][:-1]
cmd.append(')')
return '\n'.join(cmd)
def get_objects_to_create(table, created_tables=None):
if created_tables is None: created_tables = set()
result = [ table ]
indexes = [ index for index in itervalues(table.indexes) if not index.is_pk and not index.is_unique ]
for index in indexes: assert index.name is not None
indexes.sort(key=attrgetter('name'))
result.extend(indexes)
schema = table.schema
if schema.named_foreign_keys:
for foreign_key in sorted(itervalues(table.foreign_keys), key=lambda fk: fk.name):
if foreign_key.parent_table not in created_tables: continue
result.append(foreign_key)
for child_table in table.child_tables:
if child_table not in created_tables: continue
for foreign_key in sorted(itervalues(child_table.foreign_keys), key=lambda fk: fk.name):
if foreign_key.parent_table is not table: continue
result.append(foreign_key)
created_tables.add(table)
return result
def add_column(table, column_name, sql_type, converter, is_not_null=None, sql_default=None):
return table.schema.column_class(column_name, table, sql_type, converter, is_not_null, sql_default)
def add_index(table, index_name, columns, is_pk=False, is_unique=None, m2m=False):
assert index_name is not False
if index_name is True: index_name = None
if index_name is None and not is_pk:
provider = table.schema.provider
index_name = provider.get_default_index_name(table.name, (column.name for column in columns),
is_pk=is_pk, is_unique=is_unique, m2m=m2m)
index = table.indexes.get(columns)
if index and index.name == index_name and index.is_pk == is_pk and index.is_unique == is_unique:
return index
return table.schema.index_class(index_name, table, columns, is_pk, is_unique)
def add_foreign_key(table, fk_name, child_columns, parent_table, parent_columns, index_name=None):
if fk_name is None:
provider = table.schema.provider
child_column_names = tuple(column.name for column in child_columns)
fk_name = provider.get_default_fk_name(table.name, parent_table.name, child_column_names)
return table.schema.fk_class(fk_name, table, child_columns, parent_table, parent_columns, index_name)
class Column(object):
auto_template = '%(type)s PRIMARY KEY AUTOINCREMENT'
def __init__(column, name, table, sql_type, converter, is_not_null=None, sql_default=None):
if name in table.column_dict:
throw(DBSchemaError, "Column %r already exists in table %r" % (name, table.name))
table.column_dict[name] = column
table.column_list.append(column)
column.table = table
column.name = name
column.sql_type = sql_type
column.converter = converter
column.is_not_null = is_not_null
column.sql_default = sql_default
column.is_pk = False
column.is_pk_part = False
column.is_unique = False
def __repr__(column):
return '<Column(%s.%s)>' % (column.table.name, column.name)
def get_sql(column):
table = column.table
schema = table.schema
quote_name = schema.provider.quote_name
case = schema.case
result = []
append = result.append
append(quote_name(column.name))
if column.is_pk == 'auto' and column.auto_template:
append(case(column.auto_template % dict(type=column.sql_type)))
else:
append(case(column.sql_type))
if column.is_pk:
if schema.dialect == 'SQLite': append(case('NOT NULL'))
append(case('PRIMARY KEY'))
else:
if column.is_unique: append(case('UNIQUE'))
if column.is_not_null: append(case('NOT NULL'))
if column.sql_default not in (None, True, False):
append(case('DEFAULT'))
append(column.sql_default)
if schema.inline_fk_syntax and not schema.named_foreign_keys:
foreign_key = table.foreign_keys.get((column,))
if foreign_key is not None:
parent_table = foreign_key.parent_table
append(case('REFERENCES'))
append(quote_name(parent_table.name))
append(schema.column_list(foreign_key.parent_columns))
return ' '.join(result)
class Constraint(DBObject):
def __init__(constraint, name, schema):
if name is not None:
assert name not in schema.names
if name in schema.constraints: throw(DBSchemaError,
"Constraint with name %r already exists" % name)
schema.names[name] = constraint
schema.constraints[name] = constraint
constraint.schema = schema
constraint.name = name
class DBIndex(Constraint):
typename = 'Index'
def __init__(index, name, table, columns, is_pk=False, is_unique=None):
assert len(columns) > 0
for column in columns:
if column.table is not table: throw(DBSchemaError,
"Column %r does not belong to table %r and cannot be part of its index"
% (column.name, table.name))
if columns in table.indexes:
if len(columns) == 1: throw(DBSchemaError, "Index for column %r already exists" % columns[0].name)
else: throw(DBSchemaError, "Index for columns (%s) already exists" % ', '.join(repr(column.name) for column in columns))
if is_pk:
if table.pk_index is not None: throw(DBSchemaError,
'Primary key for table %r is already defined' % table.name)
table.pk_index = index
if is_unique is None: is_unique = True
elif not is_unique: throw(DBSchemaError,
"Incompatible combination of is_unique=False and is_pk=True")
elif is_unique is None: is_unique = False
schema = table.schema
if name is not None and name in schema.names:
throw(DBSchemaError, 'Index %s cannot be created, name is already in use')
Constraint.__init__(index, name, schema)
for column in columns:
column.is_pk = len(columns) == 1 and is_pk
column.is_pk_part = bool(is_pk)
column.is_unique = is_unique and len(columns) == 1
table.indexes[columns] = index
index.table = table
index.columns = columns
index.is_pk = is_pk
index.is_unique = is_unique
def exists(index, provider, connection, case_sensitive=True):
return provider.index_exists(connection, index.table.name, index.name, case_sensitive)
def get_sql(index):
return index._get_create_sql(inside_table=True)
def get_create_command(index):
return index._get_create_sql(inside_table=False)
def _get_create_sql(index, inside_table):
schema = index.schema
case = schema.case
quote_name = schema.provider.quote_name
cmd = []
append = cmd.append
if not inside_table:
if index.is_pk: throw(DBSchemaError,
'Primary key index cannot be defined outside of table definition')
append(case('CREATE'))
if index.is_unique: append(case('UNIQUE'))
append(case('INDEX'))
# if schema.provider.index_if_not_exists_syntax:
# append(case('IF NOT EXISTS'))
append(quote_name(index.name))
append(case('ON'))
append(quote_name(index.table.name))
else:
if index.name:
append(case('CONSTRAINT'))
append(quote_name(index.name))
if index.is_pk: append(case('PRIMARY KEY'))
elif index.is_unique: append(case('UNIQUE'))
else: append(case('INDEX'))
append(schema.column_list(index.columns))
return ' '.join(cmd)
class ForeignKey(Constraint):
typename = 'Foreign key'
def __init__(foreign_key, name, child_table, child_columns, parent_table, parent_columns, index_name):
schema = parent_table.schema
if schema is not child_table.schema: throw(DBSchemaError,
'Parent and child tables of foreign_key cannot belong to different schemata')
for column in parent_columns:
if column.table is not parent_table: throw(DBSchemaError,
'Column %r does not belong to table %r' % (column.name, parent_table.name))
for column in child_columns:
if column.table is not child_table: throw(DBSchemaError,
'Column %r does not belong to table %r' % (column.name, child_table.name))
if len(parent_columns) != len(child_columns): throw(DBSchemaError,
'Foreign key columns count do not match')
if child_columns in child_table.foreign_keys:
if len(child_columns) == 1: throw(DBSchemaError, 'Foreign key for column %r already defined' % child_columns[0].name)
else: throw(DBSchemaError, 'Foreign key for columns (%s) already defined' % ', '.join(repr(column.name) for column in child_columns))
if name is not None and name in schema.names:
throw(DBSchemaError, 'Foreign key %s cannot be created, name is already in use' % name)
Constraint.__init__(foreign_key, name, schema)
child_table.foreign_keys[child_columns] = foreign_key
if child_table is not parent_table:
child_table.parent_tables.add(parent_table)
parent_table.child_tables.add(child_table)
foreign_key.parent_table = parent_table
foreign_key.parent_columns = parent_columns
foreign_key.child_table = child_table
foreign_key.child_columns = child_columns
if index_name is not False:
child_columns_len = len(child_columns)
for columns in child_table.indexes:
if columns[:child_columns_len] == child_columns: break
else: child_table.add_index(index_name, child_columns, is_pk=False,
is_unique=False, m2m=bool(child_table.m2m))
def exists(foreign_key, provider, connection, case_sensitive=True):
return provider.fk_exists(connection, foreign_key.child_table.name, foreign_key.name, case_sensitive)
def get_sql(foreign_key):
return foreign_key._get_create_sql(inside_table=True)
def get_create_command(foreign_key):
return foreign_key._get_create_sql(inside_table=False)
def _get_create_sql(foreign_key, inside_table):
schema = foreign_key.schema
case = schema.case
quote_name = schema.provider.quote_name
cmd = []
append = cmd.append
if not inside_table:
append(case('ALTER TABLE'))
append(quote_name(foreign_key.child_table.name))
append(case('ADD'))
if schema.named_foreign_keys and foreign_key.name:
append(case('CONSTRAINT'))
append(quote_name(foreign_key.name))
append(case('FOREIGN KEY'))
append(schema.column_list(foreign_key.child_columns))
append(case('REFERENCES'))
append(quote_name(foreign_key.parent_table.name))
append(schema.column_list(foreign_key.parent_columns))
return ' '.join(cmd)
DBSchema.table_class = Table
DBSchema.column_class = Column
DBSchema.index_class = DBIndex
DBSchema.fk_class = ForeignKey
| |
import json
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.db.utils import IntegrityError
from django.shortcuts import HttpResponse, render
from django.shortcuts import render_to_response
from elgassia.settings import DEBUG
from main.models import MainMenu, StandardPage, Config
from main.utils.decorators import staff_member_required
@staff_member_required()
def main_menu_save(request):
error = ''
try:
menu_db = MainMenu.objects
idx = request.POST.getlist('idx[]')
title = request.POST.getlist('title[]')
dest_page = request.POST.getlist('dest_page[]')
dest_url = request.POST.getlist('dest_url[]')
remove = request.POST.getlist('remove[]')
for pos, entry in enumerate(zip(idx, remove, title, dest_page, dest_url)):
if entry[0] == '-1':
if entry[1] == 'true':
continue
k = MainMenu()
else:
k = menu_db.get(id=entry[0])
if entry[1] == 'true':
k.delete()
continue
k.position = pos
if entry[3] == 'new':
dest_page = StandardPage()
dest_page.title = entry[2]
dest_page.save()
entry = list(entry)
entry[3] = dest_page.id
k.title, k.dest_page, k.dest_url = entry[2:]
k.save()
except Exception as e:
if DEBUG:
error += str(e)
else:
error += 'Saving Error'
response = {'success': error == '', 'error': error}
return HttpResponse(json.dumps(response), content_type='application/json')
@staff_member_required()
def page_list(request):
return render(request, 'main/config/page_list.html', {
'pages': StandardPage.objects.all()
})
@staff_member_required()
def page_list_save(request):
error = ''
idx = request.POST.getlist('idx[]')
title = request.POST.getlist('title[]')
remove = request.POST.getlist('remove[]')
for entry in zip(idx, remove, title):
if entry[0] == '-1':
if entry[1] == 'true':
continue
k = StandardPage()
else:
k = StandardPage.objects.get(id=entry[0])
if entry[1] == 'true':
k.delete()
continue
k.title = entry[2]
k.save()
response = {'success': error == '', 'error': error}
return HttpResponse(json.dumps(response), content_type='application/json')
@staff_member_required()
def page_edit(request, idx):
page = StandardPage.objects.get(id=idx)
next_page = request.GET.get('next', '')
next_page = next_page or reverse('main:home')
return render(request, 'main/config/page_edit.html', {'page': page, 'next_page': next_page})
@staff_member_required()
def page_edit_save(request):
error = ''
try:
idx = request.POST['idx']
content = request.POST['content']
title = request.POST['title']
page = StandardPage.objects.get(id=idx)
page.content = content
page.title = title
page.save()
except Exception as e:
error += e.message
response = {'success': error == '', 'error': error}
return HttpResponse(json.dumps(response), content_type='application/json')
@staff_member_required()
def config_editor(request):
configs = Config.objects.all()
return render(request, 'main/config/config_editor.html', {
'configs': configs
})
@staff_member_required()
def config_editor_save(request):
error = ''
try:
idx = request.POST.getlist('idx[]')
remove = request.POST.getlist('remove[]')
key = request.POST.getlist('key[]')
value = request.POST.getlist('value[]')
for entry in zip(idx, remove, key, value):
if entry[0] == "-1":
if entry[1] == 'true':
continue
k = Config()
else:
k = Config.objects.get(id=entry[0])
if entry[1] == 'true':
k.delete()
continue
k.key, k.value = entry[2:]
k.save()
except IntegrityError as e:
error += e.message
response = {'success': error == '', 'error': error}
return HttpResponse(json.dumps(response), content_type='application/json')
@staff_member_required()
def user_list_view(request):
User = get_user_model()
return render(request, 'main/config/user_list.html', {
'users': User.objects.all()
})
@staff_member_required()
def user_save(request):
error = ''
response = {
'removed': False,
'idx': None,
}
try:
idx = request.POST['idx']
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
remove = request.POST['remove']
User = get_user_model()
try:
user = None
if idx == '-1':
if remove != 'true':
user_data = {'username': username, 'email': email}
if password:
user_data['password'] = password
user = User.objects.create_user(**user_data)
else:
user = User.objects.get(id=idx)
if remove == 'true':
user.delete()
response['removed'] = True
else:
user.username = username
user.set_email(email)
if password:
user.set_password(password)
user.save()
response['idx'] = user.id
except (IntegrityError, ValueError, ) as e:
error += e.message
except Exception as e:
print e.message
print type(e)
response['success'] = error == ''
response['error'] = error
return HttpResponse(json.dumps(response), content_type='application/json')
| |
"""
Created on Thu Mar 24 08:18:04 2016
@author: npop
The windower takes the data for a decimation level
And a reference time to reference the window to
And then calculates number of windows
And gives access to windows
The important thing is that windows can be compared to each other
Can also choose to save a window
"""
import numpy as np
import math
from datetime import datetime, timedelta
# utils
from utilsIO import *
class Windower(object):
###################
### CONSTRUCTOR
##################
def __init__(self, refTime, dataTime, data, fs, winSize, winOlap):
# data reader
self.data = data
self.fs = fs
self.winSize = winSize
self.winDuration = (winSize-1)/fs
self.winOlap = winOlap
self.chans = list(self.data.keys())
self.numSamples = len(self.data[self.chans[0]])
# refTime and dataTime are already datetime objects
self.refTime = refTime
self.dataTime = dataTime
# min window warning setting
self.minWindows = 3
# initialise
self.initialiseWindows()
###################
### GET GENERAL INFO
##################
def getWindowSize(self):
return self.winSize
def getWindowDuration(self):
return self.winDuration
def getOverlap(self):
return self.winOlap
def getNumWindows(self):
return self.numWindows
def getGlobalWindowOffset(self):
return self.winOffset
def getFirstWindowTime(self):
return self.firstWindowTime
def getSampleFreq(self):
return self.fs
def getNumSamples(self):
return self.numSamples
def getRefTime(self):
return self.refTime
def getDataTime(self):
return self.dataTime
def getWindowSamples(self):
return self.winSamples
def getWindowTimes(self):
winTimes = []
iW = 0
for samples in self.getWindowSamples():
start = samples[0]
stop = samples[1]
win = []
# global index
win.append(self.getGlobalWindowOffset() + iW)
# start time
deltaStart = timedelta(seconds=start/self.getSampleFreq())
timeStart = self.getDataTime() + deltaStart
deltaEnd = timedelta(seconds=stop/self.getSampleFreq())
timeEnd = self.getDataTime() + deltaEnd
# samples2end = self.getWindowSize() - 1 # need to remove initial sample
# timeEnd = timeStart + timedelta(seconds=samples2end/self.getSampleFreq())
win.append(timeStart)
win.append(timeEnd)
winTimes.append(win)
iW = iW + 1
return winTimes
def getActiveWindows(self):
return self.winActive
def getWindowActive(self, iW):
return self.winActive[iW]
def getChans(self):
return self.chans
def getGlobalIndex(self, iW):
return iWindow + self.getGlobalWindowOffset()
def getMinWindows(self):
return self.minWindows
###################
### GET DECIMATED DATA
### FOR THIS LEVEL
##################
def setActiveWindows(self, winActive):
self.winActive = winActive
def setMinWindows(self, minWindows):
self.minWindows = minWindows
###################
### GET DECIMATED DATA
### FOR THIS LEVEL
##################
def getData(self, iWindow):
winSamples = self.getWindowSamples()[iWindow]
winData = {}
for c in self.getChans():
winData[c] = self.data[c][winSamples[0]:winSamples[1] + 1] # add 1 because numpy indexing like this is not inclusive
return winData
def getDataGlobal(self, iGlobal):
iWindow = iGlobal - self.getGlobalWindowOffset()
return self.getData(iWindow)
###################
### CALCULATE DECIMATION LEVELS
### BASED ON AMOUNT OF DATA
##################
def initialiseWindows(self):
# have a reference time
# the first window starts there
deltaRefStart = self.getDataTime() - self.getRefTime()
if deltaRefStart.total_seconds() < 0:
self.printInfo("Error: reference time is after start of recording. Stuff may go wrong!")
# increment of window start times
# -1 because inclusive of sample at start
winStartIncrement = 1.0*(self.getWindowSize() - self.getOverlap())/self.getSampleFreq()
# calculate number of windows started before reference time
# and then by taking the ceiling, find the global index of the first window in the data
self.winOffset = int(math.ceil(deltaRefStart.total_seconds()/winStartIncrement))
# calculate start time of first global window
offsetSeconds = self.winOffset*winStartIncrement
# calculate the first window time
self.firstWindowTime = self.getRefTime() + timedelta(seconds=offsetSeconds)
# calculate first sample
deltaStart = self.firstWindowTime - self.getDataTime()
sampleStart = deltaStart.total_seconds()*self.getSampleFreq()
# next calculate number of windows
# sample start is the first sample
# window size is window size inclusive of first sample
winStart = sampleStart
winEnd = sampleStart + self.getWindowSize() - 1
winStartOff = self.getWindowSize() - self.getOverlap()
winSamples = []
while winEnd < self.getNumSamples():
winSamples.append([winStart, winEnd])
winStart = winStart + winStartOff
winEnd = winStart + self.getWindowSize() - 1
self.numWindows = len(winSamples)
# warn if number of windows is small
if self.getNumWindows() < self.getMinWindows():
self.printWarning("Number of windows in data is small - consider stopping decimation")
# save winSamples as numpy list in class
self.winSamples = np.array(winSamples, dtype=int)
# set all windows initially to active
self.winActive = np.ones(shape=(self.numWindows), dtype=bool)
###################
### DEBUG
##################
def printInfo(self):
self.printText("####################")
self.printText("WINDOWER INFO BEGIN")
self.printText("####################")
self.printText("Sample freq. [Hz] = {:f}".format(self.getSampleFreq()))
self.printText("Window size = {:d}".format(self.getWindowSize()))
self.printText("Overlap size = {:d}".format(self.getOverlap()))
self.printText("Window duration [s] = {:.3f}".format(self.getWindowDuration()))
self.printText("Reference time {}".format(self.getRefTime()))
self.printText("Data start time {}".format(self.getDataTime()))
self.printText("Number of complete windows in data = {:d}".format(self.getNumWindows()))
if self.getNumWindows() < self.getMinWindows():
self.printText("Number of windows in data is small - consider stopping decimation")
if self.getNumWindows() > 0:
self.printText("Global index of first window from reference time = {}".format(self.getGlobalWindowOffset()))
self.printText("First window starts at time {}, sample {:d}".format(self.getFirstWindowTime(), self.getWindowSamples()[0,0]))
self.printText("####################")
self.printText("WINDOWER INFO END")
self.printText("####################")
def printWindowTimes(self):
winTimes = self.getWindowTimes()
winSamples = self.getWindowSamples()
self.printText("####################")
self.printText("WINDOWER TIMES BEGIN")
self.printText("####################")
self.printText("NOTE: Sample ranges are inclusive, to get number of samples, use: sample end - sample start + 1")
for win, winS in zip(winTimes, winSamples):
self.printText("Global index = {:d}, start time = {}, end time = {}, start sample = {:d}, end sample = {:d}".format(win[0], win[1], win[2], winS[0], winS[1]))
self.printText("####################")
self.printText("WINDOWER TIMES END")
self.printText("####################")
def printText(self, infoStr):
generalPrint("Windower Info", infoStr)
def printWarning(self, warnStr):
warningPrint("Windower Warning", warnStr)
| |
import tensorflow as tf
import numpy as np
import common
import matplotlib.pyplot as plt
from collections import OrderedDict
class ForwardModel(object):
def __init__(self, state_size, action_size, rho=0.05, beta=0.3, encoding_size=50, batch_size=50, multi_layered_encoder=True, num_steps=1,
separate_encoders=True, merger=tf.mul, activation=tf.sigmoid, dropout_keep=0.5, lstm=False):
self.state_size = state_size
self.action_size = action_size
self.multi_layered_encoder = multi_layered_encoder
self.separate_encoders = separate_encoders
self.merger = merger
self.num_steps = num_steps
self.activation = activation
self.dropout_keep = dropout_keep
self.lstm = lstm
self.arch_params = {
'input_dim': lstm and (state_size + action_size) or (state_size + action_size),
'encoding_dim': encoding_size,
'small_encoding_dim': 5,
'output_dim': state_size
}
self.sparsity_params = {
'rho': tf.constant(rho),
'beta': tf.constant(beta)
}
self.training_params = {
'lr': 1e-4,
'batch_size': batch_size
}
# set all the necessary weights and biases according to the forward model structure
self.weights = OrderedDict()
self.weights.update(self.gru_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], "gru1"))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], 'decoder1'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['output_dim'], 'decoder2'))
# self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['small_encoding_dim']*self.arch_params['output_dim'], 'multiheaded1'))
# self.weights.update(self.tensor_linear_variables(self.arch_params['small_encoding_dim'],
# self.arch_params['output_dim'], 1, 'multiheaded2'))
self.weights.update(self.linear_variables(state_size, self.arch_params['encoding_dim'], 'encoder1_state'))
self.weights.update(self.linear_variables(action_size, self.arch_params['encoding_dim'], 'encoder1_action'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], 'encoder2_state'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], 'encoder2_action'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], 'encoder3'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], 'encoder4'))
#self.weights.update(self.bn_variables([1, self.arch_params['encoding_dim']], 'bn1'))
#self.weights.update(self.bn_variables([1, self.arch_params['encoding_dim']], 'bn2'))
self.states_normalizer = []
self.actions_normalizer = []
self.states_min = []
def gru_variables(self, hidden_size, input_size, name):
weights = OrderedDict()
weights[name+'_Wxr'] = self.weight_variable([input_size, hidden_size])
weights[name+'_Wxz'] = self.weight_variable([input_size, hidden_size])
weights[name+'_Wxh'] = self.weight_variable([input_size, hidden_size])
weights[name+'_Whr'] = self.weight_variable([hidden_size, hidden_size])
weights[name+'_Whz'] = self.weight_variable([hidden_size, hidden_size])
weights[name+'_Whh'] = self.weight_variable([hidden_size, hidden_size])
weights[name+'_br'] = self.bias_variable([1, hidden_size])
weights[name+'_bz'] = self.bias_variable([1, hidden_size])
weights[name+'_bh'] = self.bias_variable([1, hidden_size])
return weights
def bn_variables(self, size, name):
weights = OrderedDict()
weights[name+'_mean'] = tf.Variable(tf.constant(0.0, shape=size))
weights[name +'_variance'] = tf.Variable(tf.constant(1.0, shape=size))
weights[name + '_offset'] = tf.Variable(tf.constant(0.0, shape=size))
weights[name + '_scale'] = tf.Variable(tf.constant(1.0, shape=size))
return weights
def tensor_linear_variables(self, input_width, input_depth, output_width, name):
weights = OrderedDict()
self.weights[name+'_weights'] = self.weight_variable([input_depth, input_width, output_width])
self.weights[name+'_biases'] = self.bias_variable([input_depth, 1, output_width])
return weights
def linear_variables(self, input_size, output_size, name):
weights = OrderedDict()
self.weights[name+'_weights'] = self.weight_variable([input_size, output_size])
self.weights[name+'_biases'] = self.bias_variable([1, output_size])
return weights
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_variable(self, shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def kl_divergence(self, p, p_hat):
# returns KL(p||p_hat) - the kl divergence between two Bernoulli probability vectors p and p_hat
return p*(tf.log(p) - tf.log(p_hat)) + (1-p)*(tf.log(1-p)-tf.log(1-p_hat))
def gru_layer(self, input, hidden, weights, name):
x, h_ = input, hidden
r = tf.sigmoid(tf.matmul(x, weights[name+'_Wxr']) + tf.matmul(h_, weights[name+'_Whr']) + weights[name+'_br'])
z = tf.sigmoid(tf.matmul(x, weights[name+'_Wxz']) + tf.matmul(h_, weights[name+'_Whz']) + weights[name+'_bz'])
h_hat = tf.tanh(
tf.matmul(x, weights[name+'_Wxh']) + tf.matmul(tf.mul(r, h_), weights[name+'_Whh']) + weights[name+'_bh'])
output = tf.mul((1 - z), h_hat) + tf.mul(z, h_)
return output
def encode(self, input):
state = tf.cast(input[0], tf.float32)
action = tf.cast(input[1], tf.float32)
gru_state = tf.cast(input[2], tf.float32)
# returns an encoder
state_embedder1 = tf.nn.relu(tf.matmul(state, self.weights["encoder1_state_weights"]) + self.weights["encoder1_state_biases"])
gru_state = self.gru_layer(state_embedder1, gru_state, self.weights, 'gru1')
state_embedder2 = tf.sigmoid(tf.matmul(gru_state, self.weights["encoder2_state_weights"]) + self.weights["encoder2_state_biases"])
action_embedder1 = tf.nn.relu(tf.matmul(action, self.weights["encoder1_action_weights"]) + self.weights["encoder1_action_biases"])
action_embedder2 = tf.sigmoid(tf.matmul(action_embedder1, self.weights["encoder2_action_weights"]) + self.weights["encoder2_action_biases"])
output = self.merger(state_embedder2, action_embedder2)
hidden = tf.matmul(output, self.weights["encoder3_weights"]) + self.weights["encoder3_biases"]
#bn = tf.nn.batch_normalization(hidden, mean=self.weights["bn1_mean"], variance=self.weights["bn1_variance"],
# offset=self.weights["bn1_offset"], scale=self.weights["bn1_scale"], variance_epsilon=0.001)
hidden_relu = tf.nn.relu(hidden)
output = tf.nn.relu(tf.matmul(hidden_relu, self.weights["encoder4_weights"]) + self.weights["encoder4_biases"])
gru_state = tf.cast(gru_state, tf.float32)
return output, gru_state
def decode(self, input):
# returns a decoder
hidden = tf.matmul(input, self.weights["decoder1_weights"]) + self.weights["decoder1_biases"]
hidden_relu = tf.nn.relu(hidden)
# output is encoding_size x 1 x small_encoding_size
# multiheaded_hidden = tf.matmul(input, self.weights["multiheaded1_weights"]) + self.weights["multiheaded1_biases"]
# multiheaded_hidden = tf.reshape(multiheaded_hidden, [-1, self.arch_params['output_dim'], 1, self.arch_params['small_encoding_dim']])
# multiheaded_hidden = tf.nn.relu(multiheaded_hidden)
#
# h = tf.scan(lambda a,x: tf.batch_matmul(x, self.weights["multiheaded2_weights"]), multiheaded_hidden,
# initializer=tf.Variable(tf.constant(0.0, shape=[self.arch_params['output_dim'],1,1])))
# multiheaded_output = h + self.weights["multiheaded2_biases"]
# output1 = tf.reshape(multiheaded_output, [-1, self.arch_params['output_dim']])
output1 = tf.matmul(hidden_relu, self.weights["decoder2_weights"]) + self.weights["decoder2_biases"]
output = output1
return output
def forward(self, input):
# run a forward pass
encoding, gru_state = self.encode(input)
output = self.decode(encoding)
sparsity_loss = []
#sparsity_loss = tf.reduce_sum(self.kl_divergence(self.sparsity_params['rho'], encoding))
return output, sparsity_loss, gru_state
def calculate_loss(self, output, target, sparsity_loss):
target = tf.cast(target, dtype=tf.float32)
l2_loss = tf.nn.l2_loss(self.states_normalizer*(output - target)) / float(self.training_params['batch_size'])
return l2_loss, l2_loss + self.sparsity_params['beta']*sparsity_loss
def get_model(self):
input_state = tf.placeholder(tf.float32, shape=[None, self.state_size], name='input_state')
input_action = tf.placeholder(tf.float32, shape=[None, self.action_size], name='input_action')
gru_state = tf.placeholder(tf.float32, shape=[None, self.arch_params['encoding_dim']], name='gru_state')
input = [input_state, input_action, gru_state]
output, sparsity_loss, new_gru_state = self.forward(input)
target = tf.placeholder(tf.float32, shape=[None, self.arch_params['output_dim']], name='target')
l2_loss, loss = self.calculate_loss(output, target, sparsity_loss)
return input, target, output, loss, l2_loss, new_gru_state
def backward(self, loss):
# create an optimizer
opt = tf.train.AdamOptimizer(learning_rate=self.training_params['lr'])
# compute the gradients for a list of variables
grads_and_vars = opt.compute_gradients(loss=loss, var_list=self.weights.values())
mean_abs_grad, mean_abs_w = common.compute_mean_abs_norm(grads_and_vars)
# apply the gradient
apply_grads = opt.apply_gradients(grads_and_vars)
return apply_grads, mean_abs_grad, mean_abs_w
def train(self, objective):
self.loss = objective
self.minimize, self.mean_abs_grad, self.mean_abs_w = self.backward(self.loss)
self.loss_summary = tf.scalar_summary('loss_t', objective)
def pretrain(self, opt, lr, batch_size, num_iterations, expert_er_path):
er_expert = common.load_er(
fname=expert_er_path,
batch_size=batch_size,
history_length=100,
traj_length=2)
self.states_normalizer = er_expert.states_max - er_expert.states_min
self.actions_normalizer = er_expert.actions_max - er_expert.actions_min
#self.states_normalizer = er_expert.states_std
#self.actions_normalizer = er_expert.actions_std
self.states_normalizer[self.states_normalizer < 0.0001] = 1
self.actions_normalizer[self.actions_normalizer < 0.0001] = 1
self.states_min=er_expert.states_min
# get placeholders
input_ph, target_ph, output, loss, l2_loss, new_gru_state = self.get_model()
train_op = opt(learning_rate=lr).minimize(l2_loss)
train_losses = []
last_train_losses = []
with tf.Session() as sess:
tf.initialize_all_variables().run()
for i in range(num_iterations):
fetches = [train_op, output, l2_loss, loss, new_gru_state]
# get a trajectory from the train / test set and preprocess it
trajectory = er_expert.sample_trajectory(self.num_steps + 1)
trajectory_states = (trajectory[0] - er_expert.states_min) / self.states_normalizer
trajectory_actions = (trajectory[1] - er_expert.actions_min) / self.actions_normalizer
# set inputs and targets
s = np.ones((1, self.arch_params['encoding_dim']))
#o = trajectory_states[:batch_size, 0, :]
target = []
for step in range(self.num_steps):
input = [trajectory_states[:batch_size, step, :], trajectory_actions[:batch_size, step, :]]
target = np.squeeze(trajectory_states[:batch_size, step+1, :])
_, o, l2, l, s = sess.run(fetches,
feed_dict={input_ph[0]: input[0], input_ph[1]: input[1], input_ph[2]: s, target_ph: target})
if i % 50 == 0:
print("iteration " + str(i) + " l2 loss = " + str(l2))
print((self.states_normalizer*o[0])[:30])
print((self.states_normalizer*target[0])[:30])
print("***********************")
last_train_losses += [l2]
if len(last_train_losses) > 50:
del last_train_losses[0]
train_losses += [sum(last_train_losses) / float(len(last_train_losses))]
| |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
global flag
def new_classes():
class C1: pass
class C2(object): pass
return C1, C2
# used as handler for __getattr__ and __getattribute__
def return_100(self, name): return 100
def throw_attribute_error(self, name): raise AttributeError
def throw_assertion_error(self, name): raise AssertionError
def test_getattr_alone():
C1, C2 = new_classes()
def __init__(self): self.x = 10
def plus_100(self, name):
if self.__class__ == C2:
pass
for C in [C1, C2]:
C.__init__ = __init__
C.y = 20
c = C()
C.__getattr__ = return_100
AreEqual([c.x, c.y, c.z], [10, 20, 100])
def access_z(): return c.z
C.__getattr__ = throw_attribute_error
AreEqual([c.x, c.y], [10, 20])
AssertError(AttributeError, access_z)
C.__getattr__ = throw_assertion_error
AssertError(AssertionError, access_z)
C.__getattr__ = lambda self, name: self.x + 200 # access attribute inside
AreEqual([c.x, c.y, c.z], [10, 20, 210])
del C.__getattr__
AssertError(AttributeError, access_z)
def test_setattr_alone():
global flag
C1, C2 = new_classes()
def f(self): self.x = 10
def simply_record(self, name, value): global flag; flag = "%s %s" % (name, value)
def simply_throw(self, name, value): raise AssertionError
def add_10_via_dict(self, name, value):
self.__dict__[name] = value + 10
def add_20_via_object(self, name, value):
if self.__class__ == C2:
object.__setattr__(self, name, value + 20)
if self.__class__ == C1:
self.__dict__[name] = value + 20
for C in [C1, C2]:
C.set_something = f
c = C()
c.x = 0
C.__setattr__ = simply_record
flag = 0
c.set_something()
AreEqual(flag, "x 10")
AreEqual(c.x, 0) # unchanged
c.y = 20
AreEqual(flag, "y 20")
AssertError(AttributeError, lambda: c.y)
C.__setattr__ = simply_throw
AssertError(AssertionError, c.set_something) # even if c.x already exists
C.z = 30 # ok: class variable
AreEqual(c.z, 30)
C.__setattr__ = add_10_via_dict
c.set_something()
AreEqual(c.x, 20)
C.__setattr__ = add_20_via_object
c.u = 50
AreEqual(c.u, 70)
del C.__setattr__
c.z = 40
AreEqual([c.z, C.z], [40, 30])
def test_delattr_only():
C1, C2 = new_classes()
# low pri
@disabled("bug 365168")
def test_negative1():
class C:
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
try: C().x = 1
except TypeError: pass
else: Fail("should have thrown: can't apply this __setattr__ to instance object")
class C:
def __getattr__(self, name):
object.__getattribute__(self, name)
AssertErrorWithMessage(AttributeError, "'instance' object has no attribute 'x'", lambda: C().x)
def test_bad_signatures():
C1, C2 = new_classes()
def bad1(self): pass
def bad2(self, x): pass
def bad3(self, x, y): pass
def bad4(self, x, y, z): pass
for C in [C1, C2]:
c = C()
def f(): c.x = 1
for bad_for_get in [bad1, bad3]:
C.__getattr__ = bad_for_get
AssertError(TypeError, lambda: c.x)
for bad_for_set in [bad2, bad4]:
C.__setattr__ = bad_for_set
AssertError(TypeError, f)
for bad_for_getattribute in [bad1, bad3]:
C2.__getattribute__ = bad_for_getattribute
AssertError(TypeError, lambda: c.x)
def test_getattribute_only():
class C:
def __getattribute__(self, name):
return 10
c = C()
AssertError(AttributeError, lambda: c.x) # __getattribute__ only works for new-style
class C(object):
def set_y(self): self.y = 30
c = C()
f = c.set_y
c.x = 10
C.__getattribute__ = return_100
AreEqual(100, c.x)
c.x = 20
def plus_100(self, name):
try:
return object.__getattribute__(self, name) + 100
except AttributeError:
return 200
C.__getattribute__ = plus_100
AreEqual(120, c.x)
f()
AreEqual(130, c.y)
AreEqual(200, c.z)
C.__getattribute__ = throw_attribute_error
AssertError(AttributeError, lambda: c.x)
C.__getattribute__ = throw_assertion_error
AssertError(AssertionError, lambda: c.x)
del C.__getattribute__
AreEqual(c.x, 20)
AreEqual(c.y, 30)
AssertError(AttributeError, lambda: c.z)
def test_getattr_and_getattribute_together():
class C(object): pass
c = C()
C.__getattr__ = lambda *args: 20
C.__getattribute__ = lambda *args: 30
AreEqual(c.x, 30)
C.__getattribute__ = throw_attribute_error
AreEqual(c.x, 20)
C.__getattribute__ = throw_assertion_error
AssertError(AssertionError, lambda: c.x)
C.__getattribute__ = lambda *args: C.__getattr__(*args)
AreEqual(c.x, 20)
def test_subclassing():
C1, C2 = new_classes()
## new style
class D(C2): pass
d = D()
d.x = 10
C2.__getattr__ = return_100
AreEqual(d.y, 100)
del C2.__getattr__
def f(self, name, value): self.__dict__[name] = value + 10
C2.__setattr__ = f
d.x = 20
AreEqual(d.x, 30)
del C2.__setattr__
C2.__getattribute__ = return_100
#AreEqual(d.x, 100) # bug 365242
## old style
class D(C1): pass
d = D()
C1.__getattr__ = return_100
#AssertError(AttributeError, lambda: d.y) # (no?) dynamism for old style, bug 365266
class D(C1): pass
d = D()
d.x = 10
AreEqual([d.x, d.y], [10, 100])
C1.__setattr__ = f
class D(C1): pass
d = D()
d.x = 20
AreEqual([d.x, d.y], [30, 100])
C1.__getattribute__ = lambda *args: 200 # __getattribute__ not honored
class D(C1): pass
d = D()
AreEqual([d.x, d.y], [100, 100])
@disabled("bug 369042")
def test_delete_getattribute():
class B(object):
def __getattribute__(self, name): pass
class D(B): pass
def f(): del D.__getattribute__
AssertError(AttributeError, f)
run_test(__name__)
| |
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import gc
import imath
import IECore
import Gaffer
import GafferTest
class ArrayPlugTest( GafferTest.TestCase ) :
def test( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
self.assertTrue( n["in"]["e1"].isSame( n["in"][0] ) )
n["in"][0].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 2 )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" in n["in"] )
n["in"][0].setInput( None )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
def testConnectionGaps( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
n["in"][0].setInput( a["sum"] )
n["in"][1].setInput( a["sum"] )
n["in"][2].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
self.assertTrue( n["in"]["e1"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e2"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e3"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e4"].getInput() is None )
n["in"][1].setInput( None )
self.assertEqual( len( n["in"] ), 4 )
self.assertTrue( n["in"]["e1"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e2"].getInput() is None )
self.assertTrue( n["in"]["e3"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e4"].getInput() is None )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
s["n"]["in"][2].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( None )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].isSame( s["n"]["in"][0] ) )
self.assertTrue( s["n"]["in"]["e2"].isSame( s["n"]["in"][1] ) )
self.assertTrue( s["n"]["in"]["e3"].isSame( s["n"]["in"][2] ) )
self.assertTrue( s["n"]["in"]["e4"].isSame( s["n"]["in"][3] ) )
self.assertTrue( s["n"]["in"]["e1"].getInput(), s["a"]["sum"] )
self.assertTrue( s["n"]["in"]["e2"].getInput() is None )
self.assertTrue( s["n"]["in"]["e3"].getInput(), s["a"]["sum"] )
self.assertTrue( s["n"]["in"]["e4"].getInput() is None )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["n"]["in"] ), 4 )
self.assertTrue( s2["n"]["in"]["e1"].isSame( s2["n"]["in"][0] ) )
self.assertTrue( s2["n"]["in"]["e2"].isSame( s2["n"]["in"][1] ) )
self.assertTrue( s2["n"]["in"]["e3"].isSame( s2["n"]["in"][2] ) )
self.assertTrue( s2["n"]["in"]["e4"].isSame( s2["n"]["in"][3] ) )
self.assertTrue( s2["n"]["in"]["e1"].getInput(), s2["a"]["sum"] )
self.assertTrue( s2["n"]["in"]["e2"].getInput() is None )
self.assertTrue( s2["n"]["in"]["e3"].getInput(), s2["a"]["sum"] )
self.assertTrue( s2["n"]["in"]["e4"].getInput() is None )
def testMaximumInputs( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
# connect all inputs
for i in range( 0, 6 ) :
n["in"][i].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
# check that removing the one before the last
# leaves the last in place.
n["in"][4].setInput( None )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
if i != 4 :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
else :
self.assertTrue( n["in"][i].getInput() is None )
def testMakeConnectionAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
with Gaffer.UndoScope( s ) :
s["n"]["in"][0].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
s.redo()
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( "in" in s["n"] )
self.assertFalse( "in1" in s["n"] )
def testMinimumInputs( self ) :
a = GafferTest.AddNode()
n = Gaffer.Node()
n["in"] = Gaffer.ArrayPlug( "in", element = Gaffer.IntPlug( "e1" ), minSize=3 )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the middle input shouldn't create
# any new inputs, because there is still one free on the end
n["in"]["e2"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the last input should create a new
# one - there should always be one free input on the
# end (until the maximum is reached).
n["in"]["e3"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
n["in"]["e3"].setInput( None )
self.assertEqual( len( n["in"] ), 3 )
def testDeleteAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"]["e1"].setInput( s["a"]["sum"] )
s["n"]["in"]["e2"].setInput( s["a"]["sum"] )
s["n"]["in"]["e3"].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
with Gaffer.UndoScope( s ) :
s.deleteNodes( s, Gaffer.StandardSet( [ s["n"] ] ) )
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
s.redo()
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
def testDeleteInputNodeAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
s["n"]["in"][2].setInput( s["a"]["sum"] )
n = s["n"]
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
with Gaffer.UndoScope( s ) :
s.deleteNodes( s, Gaffer.StandardSet( [ s["a"] ] ) )
self.assertFalse( "a" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
s.redo()
self.assertFalse( "a" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
def testFixedLengthDynamic( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = Gaffer.Node()
s["n"]["a"] = Gaffer.ArrayPlug( "a", element = Gaffer.IntPlug(), minSize = 4, maxSize = 4, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["a"][1].setInput( s["a"]["sum"] )
s["n"]["a"][2].setInput( s["a"]["sum"] )
self.assertEqual( s["n"]["a"].minSize(), 4 )
self.assertEqual( s["n"]["a"].maxSize(), 4 )
self.assertEqual( len( s["n"]["a"] ), 4 )
self.assertTrue( s["n"]["a"][0].getInput() is None )
self.assertTrue( s["n"]["a"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["a"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["a"][3].getInput() is None )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["n"]["a"].minSize(), 4 )
self.assertEqual( s2["n"]["a"].maxSize(), 4 )
self.assertEqual( len( s2["n"]["a"] ), 4 )
self.assertTrue( s2["n"]["a"][0].getInput() is None )
self.assertTrue( s2["n"]["a"][1].getInput().isSame( s2["a"]["sum"] ) )
self.assertTrue( s2["n"]["a"][1].getInput().isSame( s2["a"]["sum"] ) )
self.assertTrue( s2["n"]["a"][3].getInput() is None )
def testPythonElement( self ) :
class PythonElement( Gaffer.Plug ) :
def __init__( self, name = "PythonElement", direction = Gaffer.Plug.Direction.In, flags = Gaffer.Plug.Flags.Default ) :
Gaffer.Plug.__init__( self, name, direction, flags )
def createCounterpart( self, name, direction ) :
return PythonElement( name, direction, self.getFlags() )
n = Gaffer.Node()
n["a"] = Gaffer.ArrayPlug( element = PythonElement() )
self.assertEqual( len( n["a"] ), 1 )
self.assertTrue( isinstance( n["a"][0], PythonElement ) )
p = PythonElement()
n["a"][0].setInput( p )
self.assertEqual( len( n["a"] ), 2 )
self.assertTrue( isinstance( n["a"][1], PythonElement ) )
def testTopLevelConnection( self ) :
n = Gaffer.Node()
n["a"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
n["b"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
n["b"].setInput( n["a"] )
def assertInput( plug, input ) :
self.assertEqual( len( plug ), len( input ) )
for i in range( 0, len( plug ) ) :
self.assertTrue( plug[i].getInput().isSame( input[i] ) )
assertInput( n["b"], n["a"] )
a = GafferTest.AddNode()
n["a"][0].setInput( a["sum"] )
self.assertEqual( len( n["a"] ), 2 )
assertInput( n["b"], n["a"] )
n["a"][1].setInput( a["sum"] )
self.assertEqual( len( n["a"] ), 3 )
assertInput( n["b"], n["a"] )
n["a"][0].setInput( None )
self.assertEqual( len( n["a"] ), 3 )
assertInput( n["b"], n["a"] )
def testArrayPlugCopiesColors( self ) :
n = Gaffer.Node()
n2 = Gaffer.Node()
n2.addChild(Gaffer.IntPlug("test"))
connectionColor = imath.Color3f( 0.1 , 0.2 , 0.3 )
noodleColor = imath.Color3f( 0.4, 0.5 , 0.6 )
element = Gaffer.IntPlug()
Gaffer.Metadata.registerValue( element, "connectionGadget:color", connectionColor )
Gaffer.Metadata.registerValue( element, "nodule:color", noodleColor )
n["a"] = Gaffer.ArrayPlug( element = element )
n["a"][0].setInput(n2["test"])
self.assertEqual( Gaffer.Metadata.value( n["a"][1], "connectionGadget:color" ), connectionColor )
self.assertEqual( Gaffer.Metadata.value( n["a"][1], "nodule:color" ), noodleColor )
def testOnlyOneChildType( self ) :
p = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
self.assertTrue( p.acceptsChild( Gaffer.IntPlug() ) )
self.assertFalse( p.acceptsChild( Gaffer.FloatPlug() ) )
def testDenyInputFromNonArrayPlugs( self ) :
a = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
p = Gaffer.V2iPlug()
self.assertFalse( a.acceptsInput( p ) )
def testPartialConnections( self ) :
n = Gaffer.Node()
n["p"] = Gaffer.ArrayPlug( element = Gaffer.V3fPlug( "e" ) )
self.assertEqual( len( n["p"] ), 1 )
p = Gaffer.FloatPlug()
n["p"][0]["x"].setInput( p )
self.assertEqual( len( n["p"] ), 2 )
n["p"][0]["y"].setInput( p )
self.assertEqual( len( n["p"] ), 2 )
n["p"][1]["y"].setInput( p )
self.assertEqual( len( n["p"] ), 3 )
n["p"][2]["z"].setInput( p )
self.assertEqual( len( n["p"] ), 4 )
n["p"][1]["y"].setInput( None )
self.assertEqual( len( n["p"] ), 4 )
n["p"][2]["z"].setInput( None )
self.assertEqual( len( n["p"] ), 2 )
def testResizeWhenInputsChange( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["p"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug(), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, resizeWhenInputsChange = False )
self.assertEqual( s["n"]["user"]["p"].resizeWhenInputsChange(), False )
self.assertEqual( len( s["n"]["user"]["p"] ), 1 )
s["n"]["user"]["p"][0].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["user"]["p"] ), 1 )
s["n"]["user"]["p"][0].setInput( None )
self.assertEqual( len( s["n"]["user"]["p"] ), 1 )
p = s["n"]["user"]["p"].createCounterpart( "p", Gaffer.Plug.Direction.In )
self.assertEqual( p.resizeWhenInputsChange(), False )
def testNext( self ) :
a = GafferTest.AddNode()
n = Gaffer.Node()
n["a1"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
n["a2"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug(), maxSize = 3, resizeWhenInputsChange = False )
self.assertEqual( len( n["a1"] ), 1 )
self.assertEqual( len( n["a2"] ), 1 )
self.assertEqual( n["a1"].next(), n["a1"][0] )
self.assertEqual( n["a2"].next(), n["a2"][0] )
n["a1"][0].setInput( a["sum"] )
n["a2"][0].setInput( a["sum"] )
self.assertEqual( len( n["a1"] ), 2 )
self.assertEqual( len( n["a2"] ), 1 )
self.assertEqual( n["a1"].next(), n["a1"][1] )
self.assertEqual( n["a2"].next(), n["a2"][1] )
self.assertEqual( len( n["a2"] ), 2 )
self.assertEqual( n["a1"].next(), n["a1"][1] )
self.assertEqual( n["a2"].next(), n["a2"][1] )
n["a2"].next().setInput( a["sum"] )
n["a2"].next().setInput( a["sum"] )
self.assertEqual( len( n["a2"] ), 3 )
self.assertEqual( n["a2"].next(), None )
def testResize( self ) :
p = Gaffer.ArrayPlug( element = Gaffer.IntPlug(), minSize = 1, maxSize = 3, resizeWhenInputsChange = False )
self.assertEqual( len( p ), p.minSize() )
p.resize( 2 )
self.assertEqual( len( p ), 2 )
self.assertIsInstance( p[1], Gaffer.IntPlug )
p.resize( 3 )
self.assertEqual( len( p ), 3 )
self.assertIsInstance( p[2], Gaffer.IntPlug )
with self.assertRaises( RuntimeError ) :
p.resize( p.minSize() - 1 )
with self.assertRaises( RuntimeError ) :
p.resize( p.maxSize() + 1 )
def testSerialisationUsesIndices( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
ss = s.serialise()
self.assertNotIn( "[\"" + s["n"]["in"][0].getName() + "\"]", ss )
self.assertNotIn( "[\"" + s["n"]["in"][1].getName() + "\"]", ss )
self.assertIn( "[0].setInput", ss )
self.assertIn( "[1].setInput", ss )
s2 = Gaffer.ScriptNode()
s2.execute( ss )
self.assertEqual( s2["n"]["in"][0].getInput(), s2["a"]["sum"] )
self.assertEqual( s2["n"]["in"][1].getInput(), s2["a"]["sum"] )
def tearDown( self ) :
# some bugs in the InputGenerator only showed themselves when
# the ScriptNode was deleted during garbage collection, often
# in totally unrelated tests. so we run the garbage collector
# here to localise any problems to this test, making them
# easier to diagnose and fix.
while gc.collect() :
pass
IECore.RefCounted.collectGarbage()
if __name__ == "__main__":
unittest.main()
| |
import logging
import os, sys, time
from ctypes import *
import numpy as np
import scipy.sparse as spp
from numpy.ctypeslib import ndpointer
from .backend import Backend
log = logging.getLogger(__name__)
# Find MKL library
dll_ext = '.dylib' if sys.platform == 'darwin' else '.so'
libmkl_rt = cdll.LoadLibrary('libmkl_rt' + dll_ext)
class MklBackend(Backend):
def __init__(self, device_id=0):
super(MklBackend, self).__init__()
log.debug('mkl_get_version() reports: %s', self.get_version())
self._fft_descs = dict()
def wrap(fn):
libfn = getattr(libmkl_rt, fn.__name__)
libfn.argtypes = [fn.__annotations__[arg] for arg in fn.__code__.co_varnames]
libfn.restype = fn.__annotations__['return']
def wrapped(self, *args, **kwargs):
res = libfn(*args, **kwargs)
if isinstance(res, c_long) and res.value != 0:
s = self.DftiErrorMessage( res )
raise RuntimeError( s.decode('ascii') )
return res
return wrapped
# -----------------------------------------------------------------------
# Arrays
# -----------------------------------------------------------------------
class dndarray(Backend.dndarray):
_align = 64
def _copy_from(self, arr):
self._arr.flat[:] = arr.flat
def _copy_to(self, arr):
arr.flat[:] = self._arr.flat
def _copy(self, d_arr):
dst = self._arr.reshape(-1, order='F')
src = d_arr._arr.reshape(-1, order='F')
dst.flat[:] = src.flat
def _malloc(self, shape, dtype):
elems = np.prod(shape) + self._align
self._arr_orig = arr = np.ndarray(elems, dtype)
while arr.ctypes.get_data() % self._align != 0:
arr = arr[1:]
arr = arr[:np.prod(shape)]
arr = np.asfortranarray(arr.reshape(shape))
return arr
def _free(self):
del self._arr_orig
def _zero(self):
self._arr[:] = 0
def __getitem__(self, slc):
d = self._arr[slc]
return self._backend.dndarray( self._backend, d.shape, d.dtype,
ld=self._leading_dim, own=False, data=d )
def to_host(self):
return self._arr
@staticmethod
def from_param(obj):
if not isinstance(obj, MklBackend.dndarray):
raise ArgumentError('{} is not a dndarray'.format( type(obj) ))
return obj._arr.ctypes.get_as_parameter()
def as_nparray(self):
return self._arr
@wrap
def mkl_get_max_threads() -> c_int:
pass
def get_max_threads(self):
return self.mkl_get_max_threads()
@wrap
def mkl_get_version_string( buf : c_char_p, length : c_int ) -> c_void_p:
pass
def get_version(self):
buf = create_string_buffer(128)
self.mkl_get_version_string(buf, len(buf))
return buf.value.decode('ascii')
# -----------------------------------------------------------------------
# BLAS Routines
# -----------------------------------------------------------------------
def axpby(self, beta, y, alpha, x):
""" y += alpha * x """
assert isinstance(x, self.dndarray)
assert isinstance(y, self.dndarray)
alpha = np.array(alpha, dtype=np.complex64)
beta = np.array( beta, dtype=np.complex64)
self.cblas_caxpby( y.size, alpha, x._arr, 1, beta, y._arr, 1 )
def dot(self, x, y):
""" returns x^T * y """
assert isinstance(x, self.dndarray)
assert isinstance(y, self.dndarray)
dotc = np.array(0, dtype=np.complex64)
self.cblas_cdotc_sub( x.size, x._arr, 1, y._arr, 1, dotc )
return dotc.real
def norm2(self, x):
""" returns ||x||_2"""
assert isinstance(x, self.dndarray)
res = self.cblas_scnrm2( x.size, x._arr, 1 )
return res**2
def scale(self, x, alpha):
""" x *= alpha """
assert isinstance(x, self.dndarray)
a = np.array(alpha, dtype=np.complex64)
self.cblas_cscal( x.size, a, x._arr, 1 )
def cgemm(self, y, M, x, alpha, beta, forward):
layout = MklBackend.CBlasLayout.ColMajor
if forward:
transa = MklBackend.CBlasTranspose.NoTrans
else:
transa = MklBackend.CBlasTranspose.ConjTrans
transb = MklBackend.CBlasTranspose.NoTrans
(m, n), k = y.shape, x.shape[0]
alpha = np.array(alpha, dtype=np.complex64)
beta = np.array( beta, dtype=np.complex64)
lda = M.shape[0]
ldb = x.shape[0]
ldc = y.shape[0]
self.cblas_cgemm(
layout, transa, transb, m, n, k,
alpha, M, lda, x, ldb, beta, y, ldc
)
class CBlasLayout(c_uint):
RowMajor = 101
ColMajor = 102
class CBlasTranspose(c_uint):
NoTrans = 111
Trans = 112
ConjTrans = 113
@wrap
def cblas_cgemm(
layout : CBlasLayout,
transa : CBlasTranspose,
transb : CBlasTranspose,
m : c_int,
n : c_int,
k : c_int,
alpha : ndpointer(dtype=np.complex64, ndim=0),
a : dndarray,
lda : c_int,
b : dndarray,
ldb : c_int,
beta : ndpointer(dtype=np.complex64, ndim=0),
c : dndarray,
ldc : c_int,
) -> c_void_p:
pass
class CBlasSide(c_uint):
Left = 141
Right = 142
class CBlasUplo(c_uint):
Upper = 121
Lower = 122
@wrap
def cblas_csymm(
layout: CBlasLayout,
side : CBlasSide,
uplo : CBlasUplo,
m : c_int,
n : c_int,
alpha : ndpointer(dtype=np.complex64, ndim=0),
a : dndarray,
lda : c_int,
b : dndarray,
ldb : c_int,
beta : ndpointer(dtype=np.complex64, ndim=0),
c : dndarray,
ldc : c_int,
) -> c_void_p:
pass
def csymm(self, y, M, x, alpha, beta, left=True):
layout = MklBackend.CBlasLayout.ColMajor
side = MklBackend.CBlasSide.Left if left else MklBackend.CBlasSide.Right
uplo = MklBackend.CBlasUplo.Upper
(m, n), k = y.shape, x.shape[0]
alpha = np.array(alpha, dtype=np.complex64)
beta = np.array( beta, dtype=np.complex64)
lda = M._leading_dim
ldb = x._leading_dim
ldc = y._leading_dim
self.cblas_csymm(
layout, side, uplo, m, n, alpha, M, lda,
x, ldb, beta, y, ldc
)
@wrap
def cblas_caxpby(
n : c_int,
a : ndpointer(dtype=np.complex64, ndim=0),
x : ndpointer(dtype=np.complex64),
incx : c_int,
b : ndpointer(dtype=np.complex64, ndim=0),
y : ndpointer(dtype=np.complex64),
incy : c_int,
) -> c_void_p:
pass
@wrap
def cblas_cdotc_sub(
n : c_int,
x : ndpointer(dtype=np.complex64),
incx : c_int,
y : ndpointer(dtype=np.complex64),
incy : c_int,
dotc : ndpointer(dtype=np.complex64, ndim=0),
) -> c_void_p:
pass
@wrap
def cblas_scnrm2(
n : c_int,
x : ndpointer(dtype=np.complex64),
incx : c_int,
) -> c_float:
pass
@wrap
def cblas_cscal(
n : c_int,
a : ndpointer(dtype=np.complex64, ndim=0),
x : ndpointer(dtype=np.complex64),
incx : c_int,
) -> c_void_p:
pass
# -----------------------------------------------------------------------
# FFT Routines
# -----------------------------------------------------------------------
class DFTI_DESCRIPTOR_HANDLE(c_void_p):
pass
class status_t(c_long):
pass
# read these values out of `gcc -E mkl.h | grep DFTI_WHATEVER`
# they could change so we're living on the edge here
DFTI_SINGLE = 35
DFTI_COMPLEX = 32
DFTI_INPUT_DISTANCE = 14
DFTI_OUTPUT_DISTANCE = 15
DFTI_NUMBER_OF_TRANSFORMS = 7
DFTI_PLACEMENT = 11
DFTI_INPLACE = 43
DFTI_NOT_INPLACE = 44
def _get_or_create_fft_desc(self, x):
key = (x.shape, x.dtype)
if key not in self._fft_descs:
dims, batch = x.shape[:-1][::-1], x.shape[-1]
ndim = len(dims)
if ndim == 1:
lengths = c_long(dims[0])
else:
lengths = (c_long*ndim)(*dims)
desc = self.DFTI_DESCRIPTOR_HANDLE()
self.DftiCreateDescriptor( byref(desc),
self.DFTI_SINGLE, self.DFTI_COMPLEX, ndim, lengths )
self.DftiSetValue( desc, self.DFTI_NUMBER_OF_TRANSFORMS, batch )
self.DftiSetValue( desc, self.DFTI_PLACEMENT, self.DFTI_NOT_INPLACE )
self.DftiSetValue( desc, self.DFTI_INPUT_DISTANCE, np.prod(dims) )
self.DftiSetValue( desc, self.DFTI_OUTPUT_DISTANCE, np.prod(dims) )
self.DftiCommitDescriptor( desc )
self._fft_descs[key] = desc
return self._fft_descs[key]
def fftn(self, y, x):
desc = self._get_or_create_fft_desc( x )
self.DftiComputeForward( desc, x, y )
def ifftn(self, y, x):
desc = self._get_or_create_fft_desc( x )
self.DftiComputeBackward( desc, x, y )
def __del__(self):
for desc in self._fft_descs.values():
self.DftiFreeDescriptor( byref(desc) )
@wrap
def DftiErrorMessage(
status : status_t,
) -> c_char_p:
pass
@wrap
def DftiComputeForward(
desc_handle : DFTI_DESCRIPTOR_HANDLE,
x_in : dndarray,
y_out : dndarray,
) -> status_t:
pass
@wrap
def DftiComputeBackward(
desc_handle : DFTI_DESCRIPTOR_HANDLE,
x_in : dndarray,
y_out : dndarray,
) -> status_t:
pass
@wrap
def DftiCommitDescriptor(
desc_handle : DFTI_DESCRIPTOR_HANDLE,
) -> status_t:
pass
@wrap
def DftiSetValue(
desc_handle : DFTI_DESCRIPTOR_HANDLE,
param : c_uint,
value : c_uint,
) -> status_t:
pass
@wrap
def DftiCreateDescriptor(
desc_handle : DFTI_DESCRIPTOR_HANDLE,
precision : c_uint,
domain : c_uint,
dimension : c_long,
#length : varies. just pass a ctype instance here
) -> status_t:
pass
@wrap
def DftiFreeDescriptor(
desc_handle : DFTI_DESCRIPTOR_HANDLE,
) -> status_t:
pass
# -----------------------------------------------------------------------
# CSRMM Routines
# -----------------------------------------------------------------------
class csr_matrix(Backend.csr_matrix):
_index_base = 1
@wrap
def mkl_ccsrmm(
transA : c_char*1,
m : ndpointer(dtype=np.int32, ndim=0),
n : ndpointer(dtype=np.int32, ndim=0),
k : ndpointer(dtype=np.int32, ndim=0),
alpha : ndpointer(dtype=np.dtype('complex64'), ndim=1),
matdescA : c_char * 6,
val : dndarray,
indx : dndarray,
pntrb : dndarray,
pntre : dndarray,
b : dndarray,
ldb : ndpointer(dtype=np.int32, ndim=0),
beta : ndpointer(dtype=np.dtype('complex64'), ndim=1),
c : dndarray,
ldc : ndpointer(dtype=np.int32, ndim=0),
) -> c_void_p :
pass
@wrap
def mkl_ccsrmv(
transA : c_char*1,
m : ndpointer(dtype=np.int32, ndim=0),
k : ndpointer(dtype=np.int32, ndim=0),
alpha : ndpointer(dtype=np.dtype('complex64'), ndim=1),
matdescA : c_char * 6,
val : dndarray,
indx : dndarray,
pntrb : dndarray,
pntre : dndarray,
b : dndarray,
beta : ndpointer(dtype=np.dtype('complex64'), ndim=1),
c : dndarray,
) -> c_void_p :
pass
def ccsrmm(self, y, A_shape, A_indx, A_ptr, A_vals, x, alpha, beta, adjoint=False, exwrite=False):
transA = create_string_buffer(1)
if adjoint:
transA[0] = b'C'
else:
transA[0] = b'N'
ldx = np.array(x._leading_dim, dtype=np.int32)
ldy = np.array(y._leading_dim, dtype=np.int32)
A_ptrb = A_ptr[:-1]
A_ptre = A_ptr[1:]
m = np.array(A_shape[0], dtype=np.int32)
n = np.array(x.shape[1], dtype=np.int32)
k = np.array(A_shape[1], dtype=np.int32)
alpha = np.array([alpha], dtype=np.dtype('complex64'))
beta = np.array([beta], dtype=np.dtype('complex64'))
descrA = create_string_buffer(6)
descrA[0] = b'G'
descrA[2] = b'N'
descrA[3] = b'F'
if n == 1:
self.mkl_ccsrmv(transA, m, k, alpha,
descrA, A_vals, A_indx, A_ptrb, A_ptre,
x, beta, y)
else:
self.mkl_ccsrmm(transA, m, n, k, alpha,
descrA, A_vals, A_indx, A_ptrb, A_ptre,
x, ldx, beta, y, ldy)
# -----------------------------------------------------------------------
# DIAMM Routines
# -----------------------------------------------------------------------
@wrap
def mkl_cdiamm(
transA : c_char*1,
m : ndpointer(dtype=np.int32, ndim=0),
n : ndpointer(dtype=np.int32, ndim=0),
k : ndpointer(dtype=np.int32, ndim=0),
alpha : ndpointer(dtype=np.dtype('complex64'), ndim=1),
matdescA : c_char * 6,
val : dndarray,
lval : ndpointer(dtype=np.int32, ndim=0),
idiag : dndarray,
ndiag : ndpointer(dtype=np.int32, ndim=0),
b : dndarray,
ldb : ndpointer(dtype=np.int32, ndim=0),
beta : ndpointer(dtype=np.dtype('complex64'), ndim=1),
c : dndarray,
ldc : ndpointer(dtype=np.int32, ndim=0),
) -> c_void_p :
pass
class dia_matrix(Backend.dia_matrix):
'''
Diagonal storage format for MKL backends.
MKL stores diagonals that span the vertical dimension of the matrix. This is opposite
from cuda and numpy formats. Here, we convert numpy's horizontal-diagonal representation
into a vertical one by conjugating the data entries and negating the offsets. The
evalation routines then compute forward indigo evaluations as adjoint MKL evalutions on
this alternative representation.
'''
def __init__(self, backend, A, name='mat'):
A2 = spp.dia_matrix( (np.conj(A.data), -A.offsets ), shape=A.shape[::-1])
super().__init__(backend, A2, name=name)
def cdiamm(self, y, shape, offsets, data, x, alpha=1.0, beta=0.0, adjoint=False):
transA = create_string_buffer(b'N' if adjoint else b'C', size=1)
ldx = np.array(x._leading_dim, dtype=np.int32)
ldy = np.array(y._leading_dim, dtype=np.int32)
m = np.array(shape[0], dtype=np.int32)
n = np.array(x.shape[1], dtype=np.int32)
k = np.array(shape[1], dtype=np.int32)
alpha = np.array([alpha], dtype=np.dtype('complex64'))
beta = np.array([beta], dtype=np.dtype('complex64'))
descrA = create_string_buffer(b'G_NF__', size=6)
lval = np.array(data.shape[0], dtype=np.int32)
ndiag = np.array(len(offsets._arr), dtype=np.int32)
self.mkl_cdiamm(transA, m, n, k, alpha,
descrA, data, lval, offsets, ndiag,
x, ldx, beta, y, ldy)
# -----------------------------------------------------------------------
# Misc Routines
# -----------------------------------------------------------------------
@staticmethod
def max(val, arr):
from indigo.backends._customcpu import max as fastmax
fastmax(arr.size*2, val, arr._arr)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MetaGraph and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os.path
import re
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# Prefix to be added to unbound input names so they are easily identifiable.
_UNBOUND_INPUT_PREFIX = "$unbound_inputs_"
def _node_def(from_node_def, export_scope, unbound_inputs, clear_devices=False):
"""Create a `NodeDef` proto with export_scope stripped.
Args:
from_node_def: A `node_def_pb2.NodeDef` protocol buffer.
export_scope: A `string` representing the name scope to remove.
unbound_inputs: An array of unbound input names if they exist.
clear_devices: Boolean which controls whether to clear device information
from node_def. Default false.
Returns:
A `node_def_pb2.NodeDef` protocol buffer.
"""
node_def = copy.deepcopy(from_node_def)
for i, v in enumerate(node_def.input):
if (export_scope and
not node_def.input[i].lstrip("^").startswith(export_scope)):
# Adds "$unbound_inputs_" prefix to the unbound name so they are easily
# identifiable.
node_def.input[i] = re.sub(r"([\^]|^)(.*)",
r"\1" + _UNBOUND_INPUT_PREFIX + r"\2",
compat.as_str(v))
unbound_inputs.append(node_def.input[i])
else:
node_def.input[i] = ops.strip_name_scope(v, export_scope)
node_def.name = compat.as_bytes(
ops.strip_name_scope(from_node_def.name, export_scope))
for k, v in six.iteritems(from_node_def.attr):
if k == "_class":
new_s = [compat.as_bytes(
ops.strip_name_scope(s, export_scope)) for s in v.list.s
if not export_scope or
compat.as_str(s).split("@")[1].startswith(export_scope)]
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=new_s)))
else:
node_def.attr[k].CopyFrom(v)
if clear_devices:
node_def.device = ""
return node_def
def _read_file(filename):
"""Reads a file containing `GraphDef` and returns the protocol buffer.
Args:
filename: `graph_def` filename including the path.
Returns:
A `GraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
graph_def = graph_pb2.GraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.read_file_to_string(filename)
try:
graph_def.ParseFromString(file_content)
return graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content.decode("utf-8"), graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return graph_def
def ops_used_by_graph_def(graph_def):
"""Collect the list of ops used by a graph.
Does not validate that the ops are all registered.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
A list of strings, each naming an op used by the graph.
"""
# Map function names to definitions
name_to_function = {}
for fun in graph_def.library.function:
name_to_function[fun.signature.name] = fun
# Collect the list of op names. Since functions can reference functions, we
# need a recursive traversal.
used_ops = set() # Includes both primitive ops and functions
functions_to_process = [] # A subset of used_ops
def mark_op_as_used(op):
if op not in used_ops and op in name_to_function:
functions_to_process.append(name_to_function[op])
used_ops.add(op)
for node in graph_def.node:
mark_op_as_used(node.op)
while functions_to_process:
fun = functions_to_process.pop()
if fun.node_def:
for node in fun.node_def:
mark_op_as_used(node.op)
else: # TODO(josh11b): Eventually remove this case.
for node in fun.node:
mark_op_as_used(node.op)
return [op for op in used_ops if op not in name_to_function]
def stripped_op_list_for_graph(graph_def):
"""Collect the stripped OpDefs for ops used by a graph.
This function computes the `stripped_op_list` field of `MetaGraphDef` and
similar protos. The result can be communicated from the producer to the
consumer, which can then use the C++ function
`RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
An `OpList` of ops used by the graph.
Raises:
ValueError: If an unregistered op is used.
"""
# This is the Python equivalent of StrippedOpListForGraph in C++.
# Unfortunately, since the Python op registry can differ from that in C++, we
# can't remove the duplication using swig (at least naively).
# TODO(irving): Support taking graphs directly.
used_ops = ops_used_by_graph_def(graph_def)
# Verify that all used ops are registered.
registered_ops = op_def_registry.get_registered_ops()
# These internal ops used by functions are not registered, so we need to
# whitelist them. # TODO(irving): Do something better here.
op_whitelist = ("_Arg", "_Retval", "_ListToArray", "_ArrayToList")
for op in used_ops:
if op not in registered_ops and op not in op_whitelist:
raise ValueError("Op %s is used by the graph, but is not registered" % op)
# Build the stripped op list in sorted order
return op_def_pb2.OpList(op=[registered_ops[op] for op in sorted(used_ops)
if op in registered_ops])
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind
def _should_include_node(node_or_node_name, export_scope):
"""Returns `True` if a node should be included.
Args:
node_or_node_name: A node or `string` node name.
export_scope: `string`. Name scope under which to extract the subgraph. The
scope name will be striped from the node definitions for easy import later
into new name scopes.
Returns:
`True` if the node should be included.
"""
if not isinstance(node_or_node_name, six.string_types):
try:
node_name = node_or_node_name.name
except AttributeError:
# Keep the object that we don't know how to process.
return True
else:
node_name = node_or_node_name
return (node_name.startswith(_UNBOUND_INPUT_PREFIX) or
(not export_scope or node_name.startswith(export_scope)))
def add_collection_def(meta_graph_def, key, graph=None,
export_scope=None):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
graph: The `Graph` from which to get collections.
export_scope: Optional `string`. Name scope to remove.
"""
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if not isinstance(key, six.string_types) and not isinstance(key, bytes):
logging.warning("Only collections with string type keys will be "
"serialized. This key has %s", type(key))
return
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
collection_list = graph.get_collection(key)
# Remove nodes that should not be exported from the collection list.
collection_list = [x for x in collection_list if
_should_include_node(x, export_scope)]
if not collection_list:
return
try:
col_def = meta_graph_def.collection_def[key]
to_proto = ops.get_to_proto_function(key)
proto_type = ops.get_collection_proto_type(key)
if to_proto:
kind = "bytes_list"
for x in collection_list:
# Additional type check to make sure the returned proto is indeed
# what we expect.
proto = to_proto(x, export_scope=export_scope)
if proto:
assert isinstance(proto, proto_type)
getattr(col_def, kind).value.append(proto.SerializeToString())
else:
kind = _get_kind_name(collection_list[0])
if kind == "node_list":
for x in collection_list:
if not export_scope or x.name.startswith(export_scope):
getattr(col_def, kind).value.append(
ops.strip_name_scope(x.name, export_scope))
elif kind == "bytes_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python3 distinguishes between bytes and strings.
getattr(col_def, kind).value.extend(
[compat.as_bytes(x) for x in collection_list])
else:
getattr(col_def, kind).value.extend([x for x in collection_list])
except Exception as e: # pylint: disable=broad-except
logging.warning("Error encountered when serializing %s.\n"
"Type is unsupported, or the types of the items don't "
"match field type in CollectionDef.\n%s", key, str(e))
if key in meta_graph_def.collection_def:
del meta_graph_def.collection_def[key]
return
def create_meta_graph_def(meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None,
graph=None,
export_scope=None):
"""Construct and returns a `MetaGraphDef` protocol buffer.
Args:
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
graph: The `Graph` to create `MetaGraphDef` out of.
export_scope: Optional `string`. Name scope to remove.
Returns:
MetaGraphDef protocol buffer.
Raises:
TypeError: If the arguments are not of the correct proto buffer type.
"""
# Type check.
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if meta_info_def and not isinstance(meta_info_def,
meta_graph_pb2.MetaGraphDef.MetaInfoDef):
raise TypeError("meta_info_def must be of type MetaInfoDef, not %s",
type(meta_info_def))
if graph_def and not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be of type GraphDef, not %s",
type(graph_def))
if saver_def and not isinstance(saver_def, saver_pb2.SaverDef):
raise TypeError("saver_def must be of type SaverDef, not %s",
type(saver_def))
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Creates a MetaGraphDef proto.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Adds meta_info_def.
if not meta_info_def:
meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
# Set the tf version strings to the current tf build.
meta_info_def.tensorflow_version = versions.__version__
meta_info_def.tensorflow_git_version = versions.__git_version__
meta_graph_def.meta_info_def.MergeFrom(meta_info_def)
# Adds graph_def or the default.
if not graph_def:
meta_graph_def.graph_def.MergeFrom(graph.as_graph_def(add_shapes=True))
else:
meta_graph_def.graph_def.MergeFrom(graph_def)
# Fills in meta_info_def.stripped_op_list using the ops from graph_def.
# pylint: disable=g-explicit-length-test
if len(meta_graph_def.meta_info_def.stripped_op_list.op) == 0:
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
stripped_op_list_for_graph(meta_graph_def.graph_def))
# pylint: enable=g-explicit-length-test
# Adds saver_def.
if saver_def:
meta_graph_def.saver_def.MergeFrom(saver_def)
# Adds collection_list.
if collection_list:
clist = collection_list
else:
clist = graph.get_all_collection_keys()
for ctype in clist:
add_collection_def(meta_graph_def, ctype,
graph=graph,
export_scope=export_scope)
return meta_graph_def
def read_meta_graph_file(filename):
"""Reads a file containing `MetaGraphDef` and returns the protocol buffer.
Args:
filename: `meta_graph_def` filename including the path.
Returns:
A `MetaGraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
meta_graph_def = meta_graph_pb2.MetaGraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.read_file_to_string(filename)
try:
meta_graph_def.ParseFromString(file_content)
return meta_graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return meta_graph_def
def import_scoped_meta_graph(meta_graph_or_file,
clear_devices=False,
graph=None,
import_scope=None,
input_map=None,
unbound_inputs_col_name="unbound_inputs"):
"""Recreates a`Graph` saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates all the collections, and returns a saver
constructed from the `saver_def` field.
In combination with `export_scoped_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
from graph_def. Default false.
graph: The `Graph` to import into. If `None`, use the default graph.
import_scope: Optional `string`. Name scope into which to import the
subgraph. If `None`, the graph is imported to the root name scope.
input_map: A dictionary mapping input names (as strings) in `graph_def` to
`Tensor` objects. The values of the named input tensors in the imported
graph will be re-mapped to the respective `Tensor` values.
unbound_inputs_col_name: Collection name for looking up unbound inputs.
Returns:
A dictionary of all the `Variables` imported into the name scope.
Raises:
ValueError: If the graph_def contains unbound inputs.
"""
if isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
meta_graph_def = meta_graph_or_file
else:
meta_graph_def = read_meta_graph_file(meta_graph_or_file)
if unbound_inputs_col_name:
for key, col_def in meta_graph_def.collection_def.items():
if key == unbound_inputs_col_name:
kind = col_def.WhichOneof("kind")
field = getattr(col_def, kind)
if field.value and (
not input_map or
sorted([compat.as_str(v) for v in field.value]) !=
sorted(input_map)):
raise ValueError("Graph contains unbound inputs: %s. Must "
"provide these inputs through input_map." %
",".join([compat.as_str(v) for v in field.value]))
break
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Gathers the list of nodes we are interested in.
with graph.as_default():
producer_op_list = None
if meta_graph_def.meta_info_def.HasField("stripped_op_list"):
producer_op_list = meta_graph_def.meta_info_def.stripped_op_list
input_graph_def = meta_graph_def.graph_def
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ""
importer.import_graph_def(
input_graph_def, name=(import_scope or ""), input_map=input_map,
producer_op_list=producer_op_list)
# Restores all the other collections.
for key, col_def in meta_graph_def.collection_def.items():
# Don't add unbound_inputs to the new graph.
if key == unbound_inputs_col_name:
continue
kind = col_def.WhichOneof("kind")
if kind is None:
logging.error("Cannot identify data type for collection %s. Skipping.",
key)
continue
from_proto = ops.get_from_proto_function(key)
if from_proto:
assert kind == "bytes_list"
proto_type = ops.get_collection_proto_type(key)
for value in col_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
graph.add_to_collection(
key, from_proto(proto, import_scope=import_scope))
else:
field = getattr(col_def, kind)
if kind == "node_list":
for value in field.value:
col_op = graph.as_graph_element(
ops.prepend_name_scope(value, import_scope))
graph.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python2 distinguishes between int and long, while Python3 has
# only int.
for value in field.value:
graph.add_to_collection(key, int(value))
else:
for value in field.value:
graph.add_to_collection(
key, ops.prepend_name_scope(value, import_scope))
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=import_scope)
for v in variables:
var_list[ops.strip_name_scope(v.name, import_scope)] = v
return var_list
def export_scoped_meta_graph(filename=None,
graph_def=None,
graph=None,
export_scope=None,
as_text=False,
unbound_inputs_col_name="unbound_inputs",
clear_devices=False,
**kwargs):
"""Returns `MetaGraphDef` proto. Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the
generated `MetaGraphDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
graph: The `Graph` to import into. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract
the subgraph. The scope name will be striped from the node definitions
for easy import later into new name scopes. If `None`, the whole graph
is exported. graph_def and export_scope cannot both be specified.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
unbound_inputs_col_name: Optional `string`. If provided, a string collection
with the given name will be added to the returned `MetaGraphDef`,
containing the names of tensors that must be remapped when importing the
`MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
before exporting the graph.
**kwargs: Optional keyed arguments, including meta_info_def,
saver_def, collection_list.
Returns:
A `MetaGraphDef` proto and dictionary of `Variables` in the exported
name scope.
Raises:
ValueError: When the `GraphDef` is larger than 2GB.
"""
graph = graph or ops.get_default_graph()
unbound_inputs = []
if export_scope or clear_devices:
if graph_def:
new_graph_def = graph_pb2.GraphDef()
new_graph_def.versions.CopyFrom(graph_def.versions)
for node_def in graph_def.node:
if _should_include_node(node_def.name, export_scope):
new_node_def = _node_def(node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
new_graph_def.node.extend([new_node_def])
graph_def = new_graph_def
else:
# Only do this complicated work if we want to remove a name scope.
graph_def = graph_pb2.GraphDef()
# pylint: disable=protected-access
graph_def.versions.CopyFrom(graph.graph_def_versions)
bytesize = 0
for key in sorted(graph._nodes_by_id):
if _should_include_node(graph._nodes_by_id[key].name, export_scope):
value = graph._nodes_by_id[key]
# pylint: enable=protected-access
node_def = _node_def(value.node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
graph_def.node.extend([node_def])
if value.outputs:
assert "_output_shapes" not in graph_def.node[-1].attr
graph_def.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in value.outputs])
bytesize += value.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
# It's possible that not all the inputs are in the export_scope.
# If we would like such information included in the exported meta_graph,
# add them to a special unbound_inputs collection.
if unbound_inputs_col_name:
# Clears the unbound_inputs collections.
graph.clear_collection(unbound_inputs_col_name)
for k in unbound_inputs:
graph.add_to_collection(unbound_inputs_col_name, k)
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=export_scope)
for v in variables:
if _should_include_node(v, export_scope):
var_list[ops.strip_name_scope(v.name, export_scope)] = v
scoped_meta_graph_def = create_meta_graph_def(
graph_def=graph_def,
graph=graph,
export_scope=export_scope,
**kwargs)
if filename:
graph_io.write_graph(
scoped_meta_graph_def,
os.path.dirname(filename),
os.path.basename(filename),
as_text=as_text)
return scoped_meta_graph_def, var_list
def copy_scoped_meta_graph(from_scope, to_scope,
from_graph=None, to_graph=None):
"""Copies a sub-meta_graph from one scope to another.
Args:
from_scope: `String` name scope containing the subgraph to be copied.
to_scope: `String` name scope under which the copied subgraph will reside.
from_graph: Optional `Graph` from which to copy the subgraph. If `None`, the
default graph is use.
to_graph: Optional `Graph` to which to copy the subgraph. If `None`, the
default graph is used.
Returns:
A dictionary of `Variables` that has been copied into `to_scope`.
Raises:
ValueError: If `from_scope` and `to_scope` are the same while
`from_graph` and `to_graph` are also the same.
"""
from_graph = from_graph or ops.get_default_graph()
to_graph = to_graph or ops.get_default_graph()
if from_graph == to_graph and from_scope == to_scope:
raise ValueError("'from_scope' and 'to_scope' need to be different "
"when performing copy in the same graph.")
orig_meta_graph, var_list = export_scoped_meta_graph(
export_scope=from_scope, graph=from_graph)
var_list = import_scoped_meta_graph(orig_meta_graph,
graph=to_graph,
import_scope=to_scope)
return var_list
| |
__author__ = "Manuel Escriche <mev@tid.es>"
import statistics
from datetime import date, datetime
from itertools import accumulate
from collections import Counter
from . import calendar as FWcalendar
from kconfig import tComponentsBook
from kconfig import tHelpDeskNodesBook
class ChannelDeskReporter:
def __init__(self, issuesList, title, channel):
self.title = title
self.issues = issuesList
self.channel = channel
@property
def leader(self):
return tComponentsBook[self.channel].leader
@property
def resolutionTime_graph_data(self):
count = Counter([issue.age for issue in self.issues])
pending_count = Counter([issue.age for issue in self.issues if not issue.resolved ])
_min,_max = min(count.keys()), max(count.keys())
data = [count[k] for k in range(_min,_max+1)]
pending_data = [pending_count[k] for k in range(_min,_max+1)]
recent_count = Counter([issue.age for issue in self.issues if (date.today() - issue.created).days <= 60 ])
recent_data = [recent_count[k] for k in range(_min,_max+1)]
outdata = {}
outdata['categories'] = [k for k in range(_min,_max+1)]
outdata['time'] = dict()
outdata['time']['type'] = 'column'
outdata['time']['name'] = 'Mature issues'
outdata['time']['data'] = data
outdata['age'] = dict()
outdata['age']['type'] = 'column'
outdata['age']['name'] = "Pending issues"
outdata['age']['color'] = '#ff4040'
outdata['age']['data'] = pending_data
outdata['recent'] = dict()
outdata['recent']['type'] = 'column'
outdata['recent']['name'] = "Recent issues"
outdata['recent']['color'] = 'green'
outdata['recent']['data'] = recent_data
return outdata
@property
def stats(self):
#count = Counter([issue.age for issue in self.issues])
data = [issue.age for issue in self.issues]
outdata = {}
outdata['n'] = len(data)
try:
outdata['min'] = min(data)
except: pass
try:
outdata['max'] = max(data)
except: pass
try:
outdata['mean'] = statistics.mean(data)
except: pass
try:
outdata['median'] = statistics.median(data)
except: pass
try:
outdata['mode'] = statistics.mode(data)
except: pass
try:
outdata['stdev'] = statistics.stdev(data)
except: pass
try:
outdata['variance'] = statistics.variance(data)
except: pass
return outdata
@property
def statsOfPending(self):
#count = Counter([issue.age for issue in self.issues])
data = [issue.age for issue in self.issues if not issue.resolved]
outdata = {}
outdata['n'] = len(data)
try:
outdata['min'] = min(data)
except: pass
try:
outdata['max'] = max(data)
except: pass
try:
outdata['mean'] = statistics.mean(data)
except: pass
try:
outdata['median'] = statistics.median(data)
except: pass
try:
outdata['mode'] = statistics.mode(data)
except: pass
try:
outdata['stdev'] = statistics.stdev(data)
except: pass
try:
outdata['variance'] = statistics.variance(data)
except: pass
return outdata
@property
def statsOfRecent(self):
#count = Counter([issue.age for issue in self.issues])
data = [issue.age for issue in self.issues if (date.today() - issue.created).days <= 60]
outdata = {}
outdata['n'] = len(data)
try:
outdata['min'] = min(data)
except: pass
try:
outdata['max'] = max(data)
except: pass
try:
outdata['mean'] = statistics.mean(data)
except: pass
try:
outdata['median'] = statistics.median(data)
except: pass
try:
outdata['mode'] = statistics.mode(data)
except: pass
try:
outdata['stdev'] = statistics.stdev(data)
except: pass
try:
outdata['variance'] = statistics.variance(data)
except: pass
return outdata
@property
def achievement_graph_data(self):
color = {'Resolved':'#bada55', 'Pending':'#ff4040'}
value = { True: 'Resolved', False:'Pending'}
count = Counter([value[issue.resolved != None] for issue in self.issues])
return [{'name':_type, 'y': count[_type], 'color': color[_type]} for _type in count]
@property
def evolution_graph_data(self):
book = FWcalendar.monthBook
createdIssues = Counter(['{:02d}-{}'.format(issue.created.month, issue.created.year) for issue in self.issues])
createdData = list(accumulate([createdIssues[book[month]] for month in FWcalendar.pastMonths]))
issues = [issue for issue in self.issues if issue.resolved]
resolvedIssues = Counter(['{:02d}-{}'.format(issue.resolved.month, issue.resolved.year) for issue in issues])
progressData = [resolvedIssues[book[month]] for month in FWcalendar.pastMonths]
resolvedData = list(accumulate(progressData))
outdata = {}
outdata['categories'] = FWcalendar.timeline
outdata['created'] = dict()
outdata['created']['type'] = 'spline'
outdata['created']['name'] = 'Created'
outdata['created']['data'] = createdData
outdata['resolved'] = dict()
outdata['resolved']['type'] = 'spline'
outdata['resolved']['name'] = 'Resolved'
outdata['resolved']['data'] = resolvedData
outdata['progress'] = dict()
outdata['progress']['type'] = 'column'
outdata['progress']['name'] = 'Progress'
outdata['progress']['data'] = progressData
outdata['summary'] = dict()
outdata['summary']['type'] = 'pie'
outdata['summary']['name'] = 'Status'
outdata['summary']['data'] = [{'name': 'Resolved', 'y': resolvedData[-1], 'color': 'Highcharts.getOptions().colors[1]' },
{'name': 'Pending', 'y': createdData[-1] - resolvedData[-1], 'color': '#ff4040'}]
outdata['summary']['center'] = [70, 60]
outdata['summary']['size'] = 80
outdata['summary']['dataLabels'] = {'enabled': 'true',
'format': '<b>{point.name}</b>: <br/> {point.y} ({point.percentage:.1f}%)'}
return outdata
class TrackerDeskReporter:
def __init__(self, issuesList, tracker, title):
#print(tracker)
self.title = title
self.issuesList = issuesList
self.issues = issuesList
self.components = [comp for comp in tComponentsBook if tComponentsBook[comp].tracker == tracker]
self.nodes = iter(tHelpDeskNodesBook)
self.compNames = {cmp: tComponentsBook[cmp].name for cmp in self.components}
#print(self.components)
self._issuesByCompDict = {cmp: [issue for issue in issuesList if tComponentsBook[cmp].key in issue.components]
for cmp in self.components }
#for cmp in self.components:
# print(cmp, len(self._issuesDict[cmp]))
self._issuesByNodeDict = {node: [issue for issue in issuesList if issue.assignee in tHelpDeskNodesBook[node].workers]
for node in tHelpDeskNodesBook}
self.__cmp = None
self.__node = None
def setIssues(self, cmp):
self.__cmp = cmp
self.issues = self._issuesByCompDict[cmp]
def setNodeIssues(self, node):
self.__node = node
self.issues = self._issuesByNodeDict[node]
@property
def cmpLeader(self):
return tComponentsBook[self.__cmp].leader
@property
def resolutionTime_graph_data(self):
count = Counter([issue.age for issue in self.issues])
pending_count = Counter([issue.age for issue in self.issues if not issue.resolved ])
_min,_max = min(count.keys()), max(count.keys())
data = [count[k] for k in range(_min,_max+1)]
pending_data = [pending_count[k] for k in range(_min,_max+1)]
recent_count = Counter([issue.age for issue in self.issues
if issue.resolved and (date.today() - issue.created).days <= 60 ])
recent_data = [recent_count[k] for k in range(_min,_max+1)]
outdata = {}
outdata['categories'] = [k for k in range(_min,_max+1)]
outdata['time'] = dict()
outdata['time']['type'] = 'column'
outdata['time']['name'] = 'Mature issues'
outdata['time']['data'] = data
outdata['age'] = dict()
outdata['age']['type'] = 'column'
outdata['age']['name'] = "Pending issues"
outdata['age']['color'] = '#ff4040'
outdata['age']['data'] = pending_data
outdata['recent'] = dict()
outdata['recent']['type'] = 'column'
outdata['recent']['name'] = "Recent issues"
outdata['recent']['color'] = 'green'
outdata['recent']['data'] = recent_data
return outdata
@property
def stats(self):
#count = Counter([issue.age for issue in self.issues])
data = [issue.age for issue in self.issues]
outdata = {}
outdata['n'] = len(data)
try:
outdata['min'] = min(data)
except: pass
try:
outdata['max'] = max(data)
except: pass
try:
outdata['mean'] = statistics.mean(data)
except: pass
try:
outdata['median'] = statistics.median(data)
except: pass
try:
outdata['mode'] = statistics.mode(data)
except: pass
try:
outdata['stdev'] = statistics.stdev(data)
except: pass
try:
outdata['variance'] = statistics.variance(data)
except: pass
return outdata
@property
def statsOfPending(self):
#count = Counter([issue.age for issue in self.issues])
data = [issue.age for issue in self.issues if not issue.resolved]
outdata = {}
outdata['n'] = len(data)
try:
outdata['min'] = min(data)
except: pass
try:
outdata['max'] = max(data)
except: pass
try:
outdata['mean'] = statistics.mean(data)
except: pass
try:
outdata['median'] = statistics.median(data)
except: pass
try:
outdata['mode'] = statistics.mode(data)
except: pass
try:
outdata['stdev'] = statistics.stdev(data)
except: pass
try:
outdata['variance'] = statistics.variance(data)
except: pass
return outdata
@property
def statsOfRecent(self):
#count = Counter([issue.age for issue in self.issues])
data = [issue.age for issue in self.issues if (date.today() - issue.created).days <= 60]
outdata = {}
outdata['n'] = len(data)
try:
outdata['min'] = min(data)
except: pass
try:
outdata['max'] = max(data)
except: pass
try:
outdata['mean'] = statistics.mean(data)
except: pass
try:
outdata['median'] = statistics.median(data)
except: pass
try:
outdata['mode'] = statistics.mode(data)
except: pass
try:
outdata['stdev'] = statistics.stdev(data)
except: pass
try:
outdata['variance'] = statistics.variance(data)
except: pass
return outdata
@property
def achievement_graph_data(self):
color = {'Resolved':'#bada55', 'Pending':'#ff4040'}
value = { True: 'Resolved', False:'Pending'}
count = Counter([value[issue.resolved != None] for issue in self.issues])
return [{'name':_type, 'y': count[_type], 'color': color[_type]} for _type in count]
@property
def evolution_graph_data(self):
book = FWcalendar.monthBook
createdIssues = Counter(['{:02d}-{}'.format(issue.created.month, issue.created.year) for issue in self.issues])
createdData = list(accumulate([createdIssues[book[month]] for month in FWcalendar.pastMonths]))
issues = [issue for issue in self.issues if issue.resolved]
resolvedIssues = Counter(['{:02d}-{}'.format(issue.resolved.month, issue.resolved.year) for issue in issues])
progressData = [resolvedIssues[book[month]] for month in FWcalendar.pastMonths]
resolvedData = list(accumulate(progressData))
outdata = {}
outdata['categories'] = FWcalendar.timeline
outdata['created'] = dict()
outdata['created']['type'] = 'spline'
outdata['created']['name'] = 'Created'
outdata['created']['data'] = createdData
outdata['resolved'] = dict()
outdata['resolved']['type'] = 'spline'
outdata['resolved']['name'] = 'Resolved'
outdata['resolved']['data'] = resolvedData
outdata['progress'] = dict()
outdata['progress']['type'] = 'column'
outdata['progress']['name'] = 'Progress'
outdata['progress']['data'] = progressData
outdata['summary'] = dict()
outdata['summary']['type'] = 'pie'
outdata['summary']['name'] = 'Status'
outdata['summary']['data'] = [{'name': 'Resolved', 'y': resolvedData[-1], 'color': 'Highcharts.getOptions().colors[1]' },
{'name': 'Pending', 'y': createdData[-1] - resolvedData[-1], 'color': '#ff4040'}]
outdata['summary']['center'] = [70, 60]
outdata['summary']['size'] = 80
outdata['summary']['dataLabels'] = {'enabled': 'true',
'format': '<b>{point.name}</b>: <br/> {point.y} ({point.percentage:.1f}%)'}
return outdata
if __name__ == "__main__":
pass
| |
import pygame
import sys
import math
import random
print("\n\n# # # # # INSTRUCTIONS FOR USE # # # # #")
print("This was made to facilitate development of my\n"
"Asteroids! clone's programmatically-defined\n"
"art assets.")
print("You can use it by left clicking once inside\n"
"the green square on the screen that pops up\n"
"to start drawing a line, then left clicking\n"
"again somewhere else to finish drawing it.")
print("The green square is the extent of the object's\n"
"hitbox in-game, and you can actually draw\n"
"outside it, too.")
print("\nWhen saved, the design will be written\n"
"to a text file in the same directory as\n"
"this program with a name indicative of\n"
"the number of lines used.")
print("In order to incorporate your design into the\n"
"Asteroids! game, simply copy and paste the\n"
"contents of the file over the similarly-formatted\n"
"data inside the game's draw_programmatic_object()\n"
"function, or comment out the appropriate line\n"
"and simply paste this beneath it.")
print("\nKEY COMMANDS:")
print(" -- s saves your design and quits the program")
print(" -- r cancels the current line")
print(" -- g will remove a line you've just drawn")
print(" -- q has the same function as left clicking")
print(" -- esc quits without saving")
# # # # Goal statement # # # #
# When the user clicks, a point is added to points_array.
# If the user has clicked, a line is draw from the first point in
# points_array to the cursor.
# If len(points_array) > 1, lines are drawn between the most recently
# added point and the next most recently added point.
# ((edit: start and finish each line separately now))
# ...
# When the user hits the Save key, points_array will be exported to
# a file for use in other programs as a programmatically drawn object.
# # # # Notes for future improvement # # # #
# I think this program might be using the wrong kind of event/keypress
# monitoring. See http://www.pygame.org/docs/tut/newbieguide.html
# for details, specifically the event subsystem section.
# Update: This problem has something to do with why I put in
# user_recently_clicked and tied it to the game clock via a ticker variable.
# As a result of that there's a touch of unresponsiveness if you're
# drawing very quickly. This is to prevent unwanted oversensitivity.
# The way the program is handling clicks makes it too likely to interpret
# what the user thought was a single click as multiple clicks in succession.
# The solution was to put duct tape over it and be overjoyed that the
# result actually worked.
# I am told this constitutes valuable work experience.
# # # # Constants # # # #
SCREEN_SIZE = SCREEN_WIDTH, SCREEN_HEIGHT = 300, 300
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
GREEN = [0, 255, 0]
SCREEN_CENTER_X, SCREEN_CENTER_Y = (SCREEN_WIDTH / 2), (SCREEN_HEIGHT / 2)
# # # # Functions # # # #
def add_previous_point_and_current_point_to_lines_array_as_a_line():
# Todo: Parameterize state
global user_is_currently_adding_a_line
lines_array.append([[previous_point[0], previous_point[1]],
[cursor_position[0], cursor_position[1]]])
user_is_currently_adding_a_line = False
def add_point_to_points_array():
'''
Places the x, y values of the cursor's current position into points_array.
'''
# This fails at [0, 0], but fixing that opens up another unknown.
# What placeholder value should the array be initialized with that
# the user could never click... that itself wouldn't change some
# hidden property of the array? Negative numbers? Strings??
# Edit: Refactor to "None" when I do the second pass
if points_array[0] == [0, 0]:
points_array[0][0] = cursor_position[0]
points_array[0][1] = cursor_position[1]
else:
points_array.append([cursor_position[0], cursor_position[1]])
def write_something_to_a_text_file(filename, string_to_write):
'''
Write a supplied string to a text file with the supplied name.
Generate such a file if none exists.
'''
# Edit: Refactor this whole function to a context manager.
# Jeez, the things I didn't know...
text_file = open(filename, "w")
text_file.write(string_to_write)
text_file.close()
def render_all():
'''
Draw all lines in points_array on the screen.
Also draw the tentative next line connecting the last placed point
to the cursor, if the user is currently drawing, and draws UI elements.
'''
screen.fill(BLACK)
if len(points_array) > 1:
for each_line_index_number in range(1, (len(points_array))):
pygame.draw.line(screen, WHITE, [points_array[(each_line_index_number - 1)][0], points_array[(each_line_index_number - 1)][1]], [points_array[(each_line_index_number)][0], points_array[(each_line_index_number)][1]], 1)
if len(lines_array) >= 1:
for each_line in range(0, (len(lines_array))):
pygame.draw.line(screen, WHITE, [lines_array[each_line][0][0], lines_array[each_line][0][1]], [lines_array[each_line][1][0], lines_array[each_line][1][1]], 1)
if user_is_drawing is True:
# If the user is currently drawing,
# connect their cursor to the last placed point.
if len(points_array) > 1:
pygame.draw.line(screen, WHITE, [previous_point[0][0], previous_point[0][1]], [cursor_position[0], cursor_position[1]], 1)
elif len(lines_array) >= 0:
pygame.draw.line(screen, WHITE, [previous_point[0], previous_point[1]], [cursor_position[0], cursor_position[1]], 1)
# Draws a tiny green dot in the center of the screen.
# This dot is NOT included in the saved programmatic object file.
# This is for measuring purposes only.
pygame.draw.rect(screen, GREEN, [(SCREEN_CENTER_X - 1), (SCREEN_CENTER_Y - 1), 2, 2])
# Draws a rectangle around the center 200x200 pixels
# for measuring purposes.
# Doing it this way because I want it to be here at the end,
# drawn on top of user inputted things, alongside the center dot.
pygame.draw.line(screen, GREEN, [(SCREEN_CENTER_X - 100), (SCREEN_CENTER_Y - 100)], [(SCREEN_CENTER_X + 100), (SCREEN_CENTER_Y - 100)], 1)
pygame.draw.line(screen, GREEN, [(SCREEN_CENTER_X + 100), (SCREEN_CENTER_Y - 100)], [(SCREEN_CENTER_X + 100), (SCREEN_CENTER_Y + 100)], 1)
pygame.draw.line(screen, GREEN, [(SCREEN_CENTER_X + 100), (SCREEN_CENTER_Y + 100)], [(SCREEN_CENTER_X - 100), (SCREEN_CENTER_Y + 100)], 1)
pygame.draw.line(screen, GREEN, [(SCREEN_CENTER_X - 100), (SCREEN_CENTER_Y + 100)], [(SCREEN_CENTER_X - 100), (SCREEN_CENTER_Y - 100)], 1)
pygame.display.flip()
def handle_keys():
'''
Interpret pressed keys as input commands
and execute them, mostly via state changes.
'''
# Ow. More refactoring to do later.
# Maybe a GameState singleton?
# Or is the better design pattern something involving
# multiple GameState-esque subdivision classes?
# At the very least it should accept and hand off parameters,
# perhaps in a simple dict (basically just a singleton then)
global previous_point
global keep_window_open
global user_is_drawing
global user_is_currently_adding_a_line
global lines_array
for event in pygame.event.get(): # NOTE: This does not seem to allow for continuously-held keys being re-read if another key is pressed and released during the first key's held period.
if event.type == pygame.QUIT:
sys.exit
elif event.type == pygame.KEYDOWN:
# events and KEYDOWN prevent multiple firings from holding down the button.
if event.key == pygame.K_ESCAPE:
keep_window_open = False
# Note: Previous program functionality has been disabled. Point-pair lines only now.
#if event.key == pygame.K_q:
# # Then the user is placing a point at the cursor's position.
# user_is_drawing = True
# add_point_to_points_array()
# previous_point = [cursor_position]
if event.key == pygame.K_r:
# Cancels drawing mode.
user_is_currently_adding_a_line = False
user_is_drawing = False
previous_point = [0, 0]
if event.key == pygame.K_q:
# Then the user is beginning or ending a line.
if user_is_currently_adding_a_line is True:
# Ending a line
add_previous_point_and_current_point_to_lines_array_as_a_line()
previous_point = [0, 0]
# Note: The next line is also checked in add_..._a_line() function. Redundancy. Also safety!
user_is_currently_adding_a_line = False
user_is_drawing = False
else:
# Beginning a line
user_is_currently_adding_a_line = True
user_is_drawing = True
previous_point[0] = cursor_position[0]
previous_point[1] = cursor_position[1]
if event.key == pygame.K_g:
# Then the user is removing the last completed line.
if len(lines_array) > 0:
lines_array.pop()
if event.key == pygame.K_s:
# Then the user is saving the array to a file.
random_code = random.randint(0, 1000000)
generated_filename = str(len(lines_array)) + '-line programmatic object -- randcode ' + str(random_code) + '.txt'
if len(lines_array) >= 1:
for each_line_index in range(0, (len(lines_array))):
# IMPORTANT! This is is only for the scaling system used in my Asteroids! test game.
# Please consider changing this if you're using it in the future; it's better not to divide them at all and use pixels as the yardstick, I'd guess.
# But maybe not?! There might be something to be said for having an independent scale.
# Note that the Asteroids! test game uses (object_size / 20) and here dividing the numbers by 10 as seen will fit them to that (foo / 20) metric.
# Imagine a grid, 20x20, with scaling from -10 to +10 on both axes...
# That system is conceptually useful when centerpoints are important for things like radius-based collision detection.
# start X
lines_array[each_line_index][0][0] = ((SCREEN_CENTER_X - lines_array[each_line_index][0][0]) / 10)
# start Y
lines_array[each_line_index][0][1] = ((SCREEN_CENTER_Y - lines_array[each_line_index][0][1]) / 10)
# end X
lines_array[each_line_index][1][0] = ((SCREEN_CENTER_X - lines_array[each_line_index][1][0]) / 10)
# end Y
lines_array[each_line_index][1][1] = ((SCREEN_CENTER_Y - lines_array[each_line_index][1][1]) / 10)
# If the end point of one line are close to the start point of the next, this code splits the difference. Note this assumes you care about exactly matching endpoints.
for each_line_index in range(0, (len(lines_array))):
# Special case of the first and last points:
if each_line_index == 0:
start_x_of_current_line = lines_array[each_line_index][0][0]
end_x_of_previous_line = lines_array[(len(lines_array) - 1)][1][0]
start_y_of_current_line = lines_array[each_line_index][0][1]
end_y_of_previous_line = lines_array[(len(lines_array) - 1)][1][1]
else:
start_x_of_current_line = lines_array[each_line_index][0][0]
end_x_of_previous_line = lines_array[(each_line_index - 1)][1][0]
start_y_of_current_line = lines_array[each_line_index][0][1]
end_y_of_previous_line = lines_array[(each_line_index - 1)][1][1]
# X
if ( (abs(start_x_of_current_line - end_x_of_previous_line)) <= 0.4 ):
# If abs(difference between the end points) <= 0.4, split the difference and set it to that.
difference_between_them = (abs(start_x_of_current_line - end_x_of_previous_line))
half_of_the_difference = (difference_between_them / 2)
start_x_of_current_line += half_of_the_difference
end_x_of_previous_line -= half_of_the_difference
# Round to the nearest tenth
start_x_of_current_line *= 10
start_x_of_current_line = start_x_of_current_line // 10
end_x_of_previous_line *= 10
end_x_of_previous_line = end_x_of_previous_line // 10
# Y
if ( (abs(start_y_of_current_line - end_y_of_previous_line)) <= 0.4 ):
# If abs(difference between the end points) <= 0.4, split the difference and set it to that.
difference_between_them = (abs(start_y_of_current_line - end_y_of_previous_line))
half_of_the_difference = (difference_between_them / 2)
start_y_of_current_line += half_of_the_difference
end_y_of_previous_line -= half_of_the_difference
# Round to the nearest tenth
start_y_of_current_line *= 10
start_y_of_current_line = start_y_of_current_line // 10
end_y_of_previous_line *= 10
end_y_of_previous_line = end_y_of_previous_line // 10
# This part actually does the setting. I feel like some kind of list comprehension would have helped with the index numbers. To-do list: Learn everything about list comprehensions.
if each_line_index == 0:
lines_array[each_line_index][0][0] = start_x_of_current_line
lines_array[(len(lines_array) - 1)][1][0] = end_x_of_previous_line
lines_array[each_line_index][0][1] = start_y_of_current_line
lines_array[(len(lines_array) - 1)][1][1] = end_y_of_previous_line
else:
lines_array[each_line_index][0][0] = start_x_of_current_line
lines_array[(each_line_index - 1)][1][0] = end_x_of_previous_line
lines_array[each_line_index][0][1] = start_y_of_current_line
lines_array[(each_line_index - 1)][1][1] = end_y_of_previous_line
write_something_to_a_text_file(generated_filename, str(lines_array))
keep_window_open = False
# # # # Initializations # # # #
screen = pygame.display.set_mode(SCREEN_SIZE)
user_is_drawing = False
user_is_currently_adding_a_line = False
user_recently_clicked = False
points_array = [[0, 0]]
lines_array = []
previous_point = [0, 0]
# To keep the game running
keep_window_open = True
# Create a clock object to make the game run at a specified speed in the main loop
clock = pygame.time.Clock()
# Using the game_ticker model is currently necessary to decouple program running speed from pygame's Clock function. There's probably a better way to do this somewhere... This is fairly simple, though.
game_ticker = 0
# # # # Main Loop # # # #
while keep_window_open is True:
cursor_position = cursor_x, cursor_y = pygame.mouse.get_pos()
button1_pressed, button2_pressed, button3_pressed = pygame.mouse.get_pressed()
# Process keyboard input
handle_keys()
# Event progression metering
clock.tick(40)
if game_ticker < 80:
game_ticker += 1
elif game_ticker >= 80:
game_ticker = 0
# Note: Previous program functionality has been disabled. Point-pair lines only now.
#if button1_pressed is True:
# # Left mouse click enables drawing mode and places a point in points_array.
# user_is_drawing = True
# add_point_to_points_array()
# previous_point = [cursor_position]
if ( (user_recently_clicked is True) and ((game_ticker % 30) == 1) ):
user_recently_clicked = False
if ((game_ticker % 1) == 0):
if ((button1_pressed is True) and (user_recently_clicked is False)):
if user_is_currently_adding_a_line is True:
# Ending a line
add_previous_point_and_current_point_to_lines_array_as_a_line()
previous_point = [0, 0]
# Note: The next line is also checked in add_..._a_line() function. Redundancy. Also safety!
user_is_currently_adding_a_line = False
user_is_drawing = False
user_recently_clicked = True
else:
# Beginning a line
user_is_currently_adding_a_line = True
user_is_drawing = True
previous_point[0] = cursor_position[0]
previous_point[1] = cursor_position[1]
user_recently_clicked = True
if button3_pressed is True:
# Right mouse click cancels drawing mode.
user_is_currently_adding_a_line = False
user_is_drawing = False
previous_point = [0, 0]
# Debugging section ---v
# Note: Previous program functionality has been disabled. Point-pair lines only now.
#print("\npoints_array == " + str(points_array))
#print("\nprevious_point == " + str(previous_point))
# print("\nlines_array == " + str(lines_array))
# print("\nprevious_point == " + str(previous_point))
# Display everything that needs to be displayed
render_all()
# "Be IDLE friendly," they said.
pygame.quit
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'CfiStoreItem.item'
db.alter_column(u'catalog_cfistoreitem', 'item_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['catalog.Product'], unique=True))
# Adding unique constraint on 'CfiStoreItem', fields ['item']
db.create_unique(u'catalog_cfistoreitem', ['item_id'])
def backwards(self, orm):
# Removing unique constraint on 'CfiStoreItem', fields ['item']
db.delete_unique(u'catalog_cfistoreitem', ['item_id'])
# Changing field 'CfiStoreItem.item'
db.alter_column(u'catalog_cfistoreitem', 'item_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Product']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'catalog.abstractlike': {
'Meta': {'object_name': 'AbstractLike', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'liked_time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.abstracttop': {
'Meta': {'object_name': 'AbstractTop', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'recorded_time': ('django.db.models.fields.DateTimeField', [], {})
},
'catalog.basemodel': {
'Meta': {'object_name': 'BaseModel'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'liker': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfiItemLikes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiItem']", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'})
},
'catalog.likecfiitem': {
'Meta': {'object_name': 'LikeCfiItem', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'cfi_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"})
},
'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"})
},
'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"})
},
'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.likeproducttutorial': {
'Meta': {'object_name': 'LikeProductTutorial', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.list': {
'Meta': {'object_name': 'List', '_ormbases': ['catalog.BaseModel']},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.location': {
'Meta': {'object_name': 'Location', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.note': {
'Meta': {'object_name': 'Note', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['catalog']
| |
import datetime
import hashlib
import json
from bson.objectid import ObjectId
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from crits.core.class_mapper import class_from_id, class_from_value
from crits.core.crits_mongoengine import EmbeddedSource
from crits.core.crits_mongoengine import create_embedded_source, json_handler
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.user_tools import is_admin, user_sources
from crits.core.user_tools import is_user_subscribed
from crits.certificates.certificate import Certificate
from crits.notifications.handlers import remove_user_from_notification
from crits.services.analysis_result import AnalysisResult
from crits.services.handlers import run_triage, get_supported_services
from crits.vocabulary.relationships import RelationshipTypes
def generate_cert_csv(request):
"""
Generate a CSV file of the Certificate information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request,Certificate)
return response
def get_certificate_details(md5, analyst):
"""
Generate the data to render the Certificate details template.
:param md5: The MD5 of the Certificate to get details for.
:type md5: str
:param analyst: The user requesting this information.
:type analyst: str
:returns: template (str), arguments (dict)
"""
template = None
sources = user_sources(analyst)
cert = Certificate.objects(md5=md5, source__name__in=sources).first()
if not cert:
template = "error.html"
args = {'error': 'Certificate not yet available or you do not have access to view it.'}
else:
cert.sanitize("%s" % analyst)
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, cert.id, 'Certificate')
# subscription
subscription = {
'type': 'Certificate',
'id': cert.id,
'subscribed': is_user_subscribed("%s" % analyst,
'Certificate', cert.id),
}
#objects
objects = cert.sort_objects()
#relationships
relationships = cert.sort_relationships("%s" % analyst, meta=True)
# relationship
relationship = {
'type': 'Certificate',
'value': cert.id
}
#comments
comments = {'comments': cert.get_comments(),
'url_key': md5}
#screenshots
screenshots = cert.get_screenshots(analyst)
# services
service_list = get_supported_services('Certificate')
# analysis results
service_results = cert.get_analysis_results()
args = {'service_list': service_list,
'objects': objects,
'relationships': relationships,
'comments': comments,
'relationship': relationship,
"subscription": subscription,
"screenshots": screenshots,
'service_results': service_results,
"cert": cert}
return template, args
def generate_cert_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Certificate
type_ = "certificate"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Certificates",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_),
args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_),
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'All Certificates'",
'text': "'All'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Certificates'",
'text': "'New'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Certificates'",
'text': "'In Progress'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Certificates'",
'text': "'Analyzed'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Certificates'",
'text': "'Deprecated'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Certificate'",
'text': "'Add Certificate'",
'click': "function () {$('#new-certificate').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def handle_cert_file(filename, data, source_name, user=None,
description=None, related_md5=None, method='',
reference='', relationship=None, bucket_list=None,
ticket=None, related_id=None, related_type=None,
relationship_type=None):
"""
Add a Certificate.
:param filename: The filename of the Certificate.
:type filename: str
:param data: The filedata of the Certificate.
:type data: str
:param source_name: The source which provided this Certificate.
:type source_name: str,
:class:`crits.core.crits_mongoengine.EmbeddedSource`,
list of :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param user: The user adding the Certificate.
:type user: str
:param description: Description of the Certificate.
:type description: str
:param related_md5: MD5 of a top-level object related to this Certificate.
:type related_md5: str
:param related_type: The CRITs type of the related top-level object.
:type related_type: str
:param method: The method of acquiring this Certificate.
:type method: str
:param reference: A reference to the source of this Certificate.
:type reference: str
:param relationship: The relationship between the parent and the Certificate.
:type relationship: str
:param bucket_list: Bucket(s) to add to this Certificate
:type bucket_list: str(comma separated) or list.
:param ticket: Ticket(s) to add to this Certificate
:type ticket: str(comma separated) or list.
:param related_id: ID of object to create relationship with
:type related_id: str
:param related_type: Type of object to create relationship with
:type related_id: str
:param relationship_type: Type of relationship to create.
:type relationship_type: str
:returns: dict with keys:
'success' (boolean),
'message' (str),
'md5' (str) if successful.
"""
if not data:
status = {
'success': False,
'message': 'No data object passed in'
}
return status
if len(data) <= 0:
status = {
'success': False,
'message': 'Data length <= 0'
}
return status
if ((related_type and not (related_id or related_md5)) or
(not related_type and (related_id or related_md5))):
status = {
'success': False,
'message': 'Must specify both related_type and related_id or related_md5.'
}
return status
related_obj = None
if related_id or related_md5:
if related_id:
related_obj = class_from_id(related_type, related_id)
else:
related_obj = class_from_value(related_type, related_md5)
if not related_obj:
status = {
'success': False,
'message': 'Related object not found.'
}
return status
# generate md5 and timestamp
md5 = hashlib.md5(data).hexdigest()
timestamp = datetime.datetime.now()
# generate Certificate
cert = Certificate.objects(md5=md5).first()
if not cert:
cert = Certificate()
cert.filename = filename
cert.created = timestamp
cert.size = len(data)
cert.description = description
cert.md5 = md5
# generate source information and add to certificate
if isinstance(source_name, basestring) and len(source_name) > 0:
s = create_embedded_source(source_name,
method=method,
reference=reference,
analyst=user)
cert.add_source(s)
elif isinstance(source_name, EmbeddedSource):
cert.add_source(source_name, method=method, reference=reference)
elif isinstance(source_name, list) and len(source_name) > 0:
for s in source_name:
if isinstance(s, EmbeddedSource):
cert.add_source(s, method=method, reference=reference)
if bucket_list:
cert.add_bucket_list(bucket_list, user)
if ticket:
cert.add_ticket(ticket, user)
# add file to GridFS
if not isinstance(cert.filedata.grid_id, ObjectId):
cert.add_file_data(data)
# save cert
cert.save(username=user)
cert.reload()
# run certificate triage
if len(AnalysisResult.objects(object_id=str(cert.id))) < 1 and data:
run_triage(cert, user)
# update relationship if a related top-level object is supplied
if related_obj and cert:
if relationship_type:
relationship=RelationshipTypes.inverse(relationship=relationship_type)
if not relationship:
relationship = RelationshipTypes.RELATED_TO
cert.add_relationship(related_obj,
relationship,
analyst=user,
get_rels=False)
cert.save(username=user)
status = {
'success': True,
'message': 'Uploaded certificate',
'md5': md5,
'id': str(cert.id),
'object': cert
}
return status
def delete_cert(md5, username=None):
"""
Delete a Certificate.
:param md5: The MD5 of the Certificate to delete.
:type md5: str
:param username: The user deleting the certificate.
:type username: str
:returns: True, False
"""
if is_admin(username):
cert = Certificate.objects(md5=md5).first()
if cert:
cert.delete(username=username)
return True
else:
return False
else:
return False
| |
from panda3d.core import *
from panda3d.direct import *
from direct.interval.IntervalGlobal import *
from direct.distributed import ClockDelta
from toontown.toonbase.ToonPythonUtil import lerp
import math
from direct.directnotify import DirectNotifyGlobal
from pandac.PandaModules import NodePath
from direct.task import Task
from toontown.toonbase import ToontownGlobals
from direct.distributed import DistributedObject
from direct.distributed import DistributedNode
from direct.showbase import PythonUtil
from otp.avatar import ShadowCaster
import random
from otp.otpbase import OTPGlobals
from toontown.estate import GardenGlobals
def recurseParent(intoNode, ParentName):
parent = intoNode.getParent(0)
if not parent or parent.getName() == 'render':
return 0
elif parent.getName() == ParentName:
return 1
else:
return recurseParent(parent, ParentName)
class DistributedLawnDecor(DistributedNode.DistributedNode, NodePath, ShadowCaster.ShadowCaster):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawnDecor')
def __init__(self, cr):
DistributedNode.DistributedNode.__init__(self, cr)
NodePath.__init__(self, 'decor')
ShadowCaster.ShadowCaster.__init__(self, False)
self.plantPath = NodePath('plantPath')
self.plantPath.reparentTo(self)
self.defaultModel = 'phase_9/models/cogHQ/woodCrateB'
self.messageName = None
self.model = None
self.colSphereNode = None
self.rotateNode = None
self.collSphereOffset = 0.0
self.collSphereRadius = 1.0
self.stickUp = 0.0
self.movieNode = None
self.shadowJoint = None
self.shadowScale = 1
self.expectingReplacement = 0
self.movie = None
return
def setHeading(self, h):
self.notify.debug('setting h')
DistributedNode.DistributedNode.setH(self, h)
def generateInit(self):
self.notify.debug('generateInit')
DistributedNode.DistributedNode.generateInit(self)
def generate(self):
self.notify.debug('generate')
self.reparentTo(render)
DistributedNode.DistributedNode.generate(self)
def announceGenerate(self):
self.notify.debug('announceGenerate')
DistributedNode.DistributedNode.announceGenerate(self)
self.doModelSetup()
self.loadModel()
self.setupShadow()
self.makeMovieNode()
self.stick2Ground()
self.setupCollision()
def doModelSetup(self):
pass
def disable(self):
self.notify.debug('disable')
self.finishMovies()
self.handleExitPlot()
self.ignoreAll()
DistributedNode.DistributedNode.disable(self)
if hasattr(self, 'nodePath'):
self.nodePath.detachNode()
def delete(self):
self.notify.debug('delete')
ShadowCaster.ShadowCaster.delete(self)
self.unloadModel()
DistributedNode.DistributedNode.delete(self)
def loadModel(self):
if not self.rotateNode:
self.rotateNode = self.plantPath.attachNewNode('rotate')
self.model = None
if __dev__:
self.model = loader.loadModel(self.defaultModel)
self.model.setScale(0.4, 0.4, 0.1)
self.model.reparentTo(self.rotateNode)
return
def setupShadow(self):
self.shadowJoint = self.rotateNode.attachNewNode('shadow')
self.initializeDropShadow(False)
self.shadowJoint.setScale(self.shadowScale)
self.setActiveShadow()
def makeMovieNode(self):
self.movieNode = self.rotateNode.attachNewNode('moviePos')
self.movieNode.setPos(0, -3, 0)
def setupCollision(self):
self.messageName = self.uniqueName('enterplotSphere')
self.messageStartName = self.uniqueName('plotSphere')
self.exitMessageName = self.uniqueName('exitplotSphere')
if self.collSphereOffset <= 0.1:
colSphere = CollisionSphere(0, 0, 0, self.collSphereRadius)
else:
colSphere = CollisionTube(0, -self.collSphereOffset, 0, 0, self.collSphereOffset, 0, self.collSphereRadius)
colSphere.setTangible(0)
colNode = CollisionNode(self.messageStartName)
colNode.addSolid(colSphere)
colSphereNode = self.attachNewNode(colNode)
self.colSphereNode = colSphereNode
self.accept(self.messageName, self.handleEnterPlot)
self.accept(self.exitMessageName, self.handleExitPlot)
def handleEnterPlot(self, optional = None):
self.notify.debug('handleEnterPlot %d' % self.doId)
self.sendUpdate('plotEntered', [])
def handleExitPlot(self, optional = None):
if base.localAvatar.inGardenAction == self:
base.localAvatar.handleEndPlantInteraction(self, replacement=self.expectingReplacement)
def handleWatering(self):
self.handleExitPlot()
base.localAvatar.removeShovelRelatedDoId(self.doId)
def unloadModel(self):
if self.model:
self.model.removeNode()
del self.model
self.model = None
if hasattr(self, 'nodePath') and self.nodePath:
self.nodePath.removeNode()
self.nodePath = None
taskMgr.remove(self.uniqueName('adjust tree'))
return
def setPos(self, x, y, z):
DistributedNode.DistributedNode.setPos(self, x, y, z)
self.stick2Ground()
def setPosition(self, x, y, z):
DistributedNode.DistributedNode.setPos(self, x, y, z)
self.stick2Ground()
def stick2Ground(self, taskfooler = 0):
if self.isEmpty():
return Task.done
testPath = NodePath('testPath')
testPath.reparentTo(render)
cRay = CollisionRay(0.0, 0.0, 40000.0, 0.0, 0.0, -1.0)
cRayNode = CollisionNode(self.uniqueName('estate-FloorRay'))
cRayNode.addSolid(cRay)
cRayNode.setFromCollideMask(OTPGlobals.FloorBitmask)
cRayNode.setIntoCollideMask(BitMask32.allOff())
cRayNodePath = testPath.attachNewNode(cRayNode)
queue = CollisionHandlerQueue()
picker = CollisionTraverser()
picker.addCollider(cRayNodePath, queue)
if self.movieNode:
testPath.setPos(self.movieNode.getX(render), self.movieNode.getY(render), 0)
picker.traverse(render)
if queue.getNumEntries() > 0:
queue.sortEntries()
for index in range(queue.getNumEntries()):
entry = queue.getEntry(index)
if recurseParent(entry.getIntoNode(), 'terrain_DNARoot'):
self.movieNode.setZ(entry.getSurfacePoint(self)[2])
testPath.setPos(self.getX(), self.getY(), 0)
picker.traverse(render)
if queue.getNumEntries() > 0:
queue.sortEntries()
for index in range(queue.getNumEntries()):
entry = queue.getEntry(index)
if recurseParent(entry.getIntoNode(), 'terrain_DNARoot'):
self.setZ(entry.getSurfacePoint(render)[2] + self.stickUp + 0.1)
self.stickParts()
return Task.done
taskMgr.doMethodLater(1.0, self.stick2Ground, uniqueName('groundsticker'))
return Task.done
def stickParts(self):
pass
def setPlot(self, plot):
self.plot = plot
def setH(self, h):
DistributedNode.DistributedNode.setH(self, h)
def getPlot(self):
return self.plot
def setOwnerIndex(self, index):
self.ownerIndex = index
def getOwnerIndex(self):
return self.ownerIndex
def getOwnerId(self):
retval = 0
estate = base.cr.doFind('DistributedEstate')
if estate and hasattr(estate, 'idList') and estate.idList:
if self.ownerIndex < len(estate.idList):
retval = estate.idList[self.ownerIndex]
return retval
def canBePicked(self):
retval = True
self.notify.debug('base.localAvatar.doId : %s' % base.localAvatar.doId)
self.notify.debug('self.getOwnerId : %s ' % self.getOwnerId())
self.notify.debug("statue's DoId : %s " % self.doId)
if not hasattr(base, 'localAvatar') or not base.localAvatar.doId == self.getOwnerId():
retval = False
return retval
def allowedToPick(self):
return True
def unlockPick(self):
return True
def handleRemove(self):
if not self.canBePicked():
self.notify.debug("I don't own this item, just returning")
return
base.localAvatar.hideShovelButton()
base.localAvatar.hideWateringCanButton()
self.startInteraction()
self.sendUpdate('removeItem', [])
def generateToonMoveTrack(self, toon):
node = NodePath('tempNode')
displacement = Vec3(toon.getPos(render) - self.getPos(render))
displacement.setZ(0)
displacement.normalize()
movieDistance = self.movieNode.getDistance(self.rotateNode)
displacement *= movieDistance
node.reparentTo(render)
node.setPos(displacement + self.getPos(render))
node.lookAt(self)
heading = PythonUtil.fitDestAngle2Src(toon.getH(render), node.getH(render))
hpr = toon.getHpr(render)
hpr.setX(heading)
finalX = node.getX(render)
finalY = node.getY(render)
finalZ = node.getZ(render)
node.removeNode()
toonTrack = Sequence(Parallel(ActorInterval(toon, 'walk', loop=True, duration=1), Parallel(LerpPosInterval(toon, 1.0, Point3(finalX, finalY, toon.getZ(render)), fluid=True, bakeInStart=False)), LerpHprInterval(toon, 1.0, hpr=hpr)), Func(toon.loop, 'neutral'))
return toonTrack
def startInteraction(self):
place = base.cr.playGame.getPlace()
if place:
place.detectedGardenPlotUse()
base.localAvatar.setInGardenAction(self)
def finishInteraction(self):
if hasattr(base.cr.playGame.getPlace(), 'detectedGardenPlotDone'):
base.cr.playGame.getPlace().detectedGardenPlotDone()
self.notify.debug('done interaction')
else:
self.notify.warning('base.cr.playGame.getPlace() does not have detectedGardenPlotDone')
if hasattr(base, 'localAvatar'):
base.localAvatar.handleEndPlantInteraction(self)
def startCamIval(self, avId):
track = Sequence()
if avId == localAvatar.doId:
track = Sequence(Func(base.localAvatar.disableSmartCameraViews), Func(base.localAvatar.setCameraPosForPetInteraction))
return track
def stopCamIval(self, avId):
track = Sequence()
if avId == localAvatar.doId:
track = Sequence(Func(base.localAvatar.unsetCameraPosForPetInteraction), Wait(0.8), Func(base.localAvatar.enableSmartCameraViews))
return track
def canBeWatered(self):
return 0
def getShovelAction(self):
return None
def getShovelCommand(self):
return None
def canBePlanted(self):
return 0
def movieDone(self):
self.sendUpdate('movieDone', [])
def setMovie(self, mode, avId):
if mode == GardenGlobals.MOVIE_FINISHPLANTING:
self.doFinishPlantingTrack(avId)
elif mode == GardenGlobals.MOVIE_REMOVE:
self.doDigupTrack(avId)
def finishMovies(self):
if self.movie:
self.movie.finish()
self.movie = None
return
def doDigupTrack(self, avId):
toon = base.cr.doId2do.get(avId)
if not toon:
return
self.finishMovies()
self.model.setTransparency(1)
self.model.setAlphaScale(1)
shovel = toon.attachShovel()
shovel.hide()
moveTrack = self.generateToonMoveTrack(toon)
digupTrack = self.generateDigupTrack(toon)
self.movie = Sequence(self.startCamIval(avId), moveTrack, Func(shovel.show), digupTrack)
if avId == localAvatar.doId:
self.expectingReplacement = 1
self.movie.append(Func(self.movieDone))
self.movie.start()
def generateDigupTrack(self, toon):
sound = loader.loadSfx('phase_5.5/audio/sfx/burrow.ogg')
sound.setPlayRate(0.5)
pos = self.model.getPos()
pos.setZ(pos[2] - 1)
track = Parallel()
track.append(Sequence(ActorInterval(toon, 'start-dig'), Parallel(ActorInterval(toon, 'loop-dig', loop=1, duration=5.13), Sequence(Wait(0.25), SoundInterval(sound, node=toon, duration=0.55), Wait(0.8), SoundInterval(sound, node=toon, duration=0.55), Wait(1.35), SoundInterval(sound, node=toon, duration=0.55))), ActorInterval(toon, 'start-dig', playRate=-1), LerpFunc(self.model.setAlphaScale, fromData=1, toData=0, duration=1), Func(toon.loop, 'neutral'), Func(toon.detachShovel)))
return track
def doFinishPlantingTrack(self, avId):
toon = base.cr.doId2do.get(avId)
if not toon:
return
self.finishMovies()
self.movie = Sequence()
if avId == localAvatar.doId:
self.startInteraction()
if self.model:
self.model.setTransparency(1)
self.model.setAlphaScale(0)
self.movie.append(LerpFunc(self.model.setAlphaScale, fromData=0, toData=1, duration=3))
self.movie.append(self.stopCamIval(avId))
self.movie.append(Func(toon.detachShovel))
self.movie.append(Func(toon.loop, 'neutral'))
if avId == localAvatar.doId:
self.movie.append(Func(self.finishInteraction))
self.movie.append(Func(self.movieDone))
if hasattr(self, 'doResultDialog'):
self.movie.append(Func(self.doResultDialog))
self.movie.start()
def interactionDenied(self, avId):
if avId == localAvatar.doId:
self.finishInteraction()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'quentingerome'
import requests
from cherrypy.lib.auth2 import require, member_of
import logging
import htpc
import cherrypy
from HTMLParser import HTMLParser
from htpc.helpers import striphttp
logger = logging.getLogger('modules.utorrent')
class AuthTokenParser(HTMLParser):
token = None
def handle_data(self, data):
self._token = data
def token(self, html):
self._token = None
self.feed(html)
return self._token
fields = {
'name': 2,
'id': 0,
'status': 1,
'size': 3,
'percentage_done': 4,
'dl': 5,
'up': 6,
'dl_speed': 9,
'up_speed': 8,
'eta': 10,
'ratio': 7,
}
status = {
1: 'Started',
2: 'Checking',
4: 'Started&Checked',
8: 'Checked',
16: 'Error',
32: 'Paused',
64: 'Queued',
128: 'Loaded'
}
def _get_torrent_state(state_sum):
"""
Returns a list of all states of the torrent
:param value: int
:return: str
"""
states = []
for ps in sorted(status.keys(), reverse=True):
if not state_sum:
break
if ps <= state_sum:
states.append(ps)
state_sum -= ps
return states
def TorrentResult(values):
"""
:param values:
:type values: list
:return:
:rtype: dict
"""
def get_result(vals):
for key, idx in fields.items():
if key != 'status':
yield key, vals[idx]
else:
yield key, _get_torrent_state(vals[idx])
return dict([(k, v) for k, v in get_result(values)])
class ConnectionError(Exception):
pass
class UTorrent(object):
_token = ''
_cookies = None
def __init__(self):
self.sess = requests.Session()
htpc.MODULES.append({
'name': 'uTorrent',
'id': 'utorrent',
'test': htpc.WEBDIR + 'utorrent/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'utorrent_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'utorrent_name'},
{'type': 'text', 'label': 'IP / Host *', 'name': 'utorrent_host'},
{'type': 'text', 'label': 'Port', 'placeholder': '8080', 'name': 'utorrent_port'},
{'type': 'text', 'label': 'Username', 'name': 'utorrent_username'},
{'type': 'password', 'label': 'Password', 'name': 'utorrent_password'},
{'type': 'text', 'label': 'Reverse proxy link', 'placeholder': '', 'desc': 'Reverse proxy link, e.g. https://utorrent.domain.com', 'name': 'utorrent_reverse_proxy_link'},
]
})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template('utorrent.html').render(scriptname='utorrent', webinterface=self.webinterface())
def webinterface(self):
ip = htpc.settings.get('utorrent_host')
port = htpc.settings.get('utorrent_port')
url = 'http://%s:%s/gui/' % (ip, port)
if htpc.settings.get('utorrent_reverse_proxy_link'):
url = htpc.settings.get('utorrent_reverse_proxy_link')
return url
@cherrypy.tools.json_out()
@cherrypy.expose()
@require()
def torrents(self):
req = self.fetch('&list=1')
if req:
torrents = req.json()['torrents']
return {'torrents': [TorrentResult(tor) for tor in torrents], 'result': req.status_code}
else:
return {'result': 500}
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def start(self, torrent_id):
return self.do_action('start', hash=torrent_id).json()
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def stop(self, torrent_id):
return self.do_action('stop', hash=torrent_id).json()
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def remove(self, torrent_id):
return self.do_action('remove', hash=torrent_id).json()
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def remove_data(self, torrent_id):
return self.do_action('removedata', hash=torrent_id).json()
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def add_url(self, url):
try:
res = self.do_action('add-url', s=url)
return {'result': res.status_code}
except ConnectionError, e:
logger.exception(e)
@cherrypy.tools.json_out()
@require()
@cherrypy.expose()
def get_speed_limit(self):
r = self.do_action('getsettings')
d = {}
if r:
for k in r.json()['settings']:
if 'max_dl_rate' in k:
d['dl'] = k[2]
elif 'max_ul_rate' in k:
d['ul'] = k[2]
return d
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def to_client(self, link, torrentname, **kwargs):
try:
logger.info('Added %s to uTorrent' % torrentname)
res = self.do_action('add-url', s=link)
return {'result': res.status_code}
except Exception as e:
logger.error('Failed to sendt %s to uTorrent %s %s' % (link, torrentname, e))
def change_label(self, hash, label):
return self.do_action('setprops', hash=hash, s='label', v=label)
@cherrypy.expose()
@require(member_of(htpc.role_admin))
@cherrypy.tools.json_out()
def ping(self, utorrent_host='', utorrent_port='',
utorrent_username='', utorrent_password='', **kwargs):
logger.debug("Testing uTorrent connectivity")
res = self.fetch('&list=1', host=utorrent_host, port=utorrent_port, username=utorrent_username, password=utorrent_password)
if res.status_code == 200:
return True
else:
logger.error("Unable to contact uTorrent via " + self._get_url(utorrent_host, utorrent_port))
return
def do_action(self, action, hash=None, s=None, **kwargs):
"""
:param action:
:param hash:
:param kwargs:
:rtype: requests.Response
:return:
"""
if action not in ('start', 'stop', 'pause', 'forcestart', 'unpause', 'remove', 'removedata', 'add-url', 'recheck', 'setprio',
'queuebottom', 'queuetop', 'queuedown', 'queueup', 'getfiles', 'getsettings', 'setsetting'):
raise AttributeError
if action == 'add-url':
return self.fetch('&action=%s&s=%s' % (action, s))
params_str = ''.join(["&%s=%s" % (k, v) for k, v in kwargs.items()])
if hash is None:
# getsettings
return self.fetch('&action=%s%s' % (action, params_str))
return self.fetch('&action=%s%s&hash=%s' % (action, params_str, hash))
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def change_speed(self, **kw):
if 'max_ul_rate' or 'max_dl_rate' in kw:
self.do_action('setsetting', kw)
else:
logger.error('Wrong parameters given')
@cherrypy.expose()
@require(member_of(htpc.role_user))
def set_upspeed(self, speed, *arg, **kw):
return self.fetch('&action=setsetting&s=max_ul_rate&v=' + speed)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def set_downspeed(self, speed, *arg, **kw):
return self.fetch('&action=setsetting&s=max_dl_rate&v=' + speed)
def _get_url(self, host=None, port=None):
u_host = host or htpc.settings.get('utorrent_host')
u_port = port or htpc.settings.get('utorrent_port')
return 'http://{}:{}/gui/'.format(striphttp(u_host), u_port)
def auth(self, host, port, username, pwd):
logger.debug('Fetching auth token')
token_page = self.sess.get(self._get_url(host, port) + 'token.html', auth=(username, pwd))
self._token = AuthTokenParser().token(token_page.content)
self._cookies = token_page.cookies
logger.debug('Auth token is %s' % self._token)
return self._token
def fetch(self, args, username='', password='', host='', port=''):
"""
:param args:
:rtype: requests.Response
:return:
"""
password = password or htpc.settings.get('utorrent_password', '')
username = username or htpc.settings.get('utorrent_username', '')
host = host or htpc.settings.get('utorrent_host')
port = port or htpc.settings.get('utorrent_port')
token_str = '?token=%s' % self._token
url = self._get_url(host, port) + token_str + args
logger.debug('Fetching %s' % url)
try:
r = self.sess.get(url, timeout=5, auth=(username, password))
# Api returns 300 if invalid token according to the docs but it really returns 400
# ut 3.4.5 returns 401 when you try to get the token
if r.status_code in [401, 400, 300]:
token = self.auth(host, port, username, password)
if token:
return self.fetch(args)
elif r.status_code == 404:
logger.error('Check your settings, invalid username or password')
elif r.status_code == 200:
if r:
return r
except Exception as e:
logger.error('Failed to fetch %s with args %s %s' % (url, args, e), exc_info=True)
| |
#!/usr/bin/env python
# encoding: utf-8
import os
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth.core import User
from website import settings
from website.app import init_app
from website.conferences.model import Conference
def main():
init_app(set_backends=True, routes=False)
populate_conferences()
MEETING_DATA = {
'spsp2014': {
'name': 'Society for Personality and Social Psychology 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'asb2014': {
'name': 'Association of Southeastern Biologists 2014',
'info_url': 'http://www.sebiologists.org/meetings/talks_posters.html',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'aps2014': {
'name': 'Association for Psychological Science 2014',
'info_url': 'http://centerforopenscience.org/aps/',
'logo_url': '/static/img/2014_Convention_banner-with-APS_700px.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'annopeer2014': {
'name': '#annopeer',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'cpa2014': {
'name': 'Canadian Psychological Association 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'filaments2014': {
'name': 'National Radio Astronomy Observatory Filaments 2014',
'info_url': None,
'logo_url': 'https://science.nrao.edu/science/meetings/2014/'
'filamentary-structure/images/filaments2014_660x178.png',
'active': False,
'admins': [
'lvonschi@nrao.edu',
# 'Dkim@nrao.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'bitss2014': {
'name': 'Berkeley Initiative for Transparency in the Social Sciences Research Transparency Forum 2014',
'info_url': None,
'logo_url': os.path.join(
settings.STATIC_URL_PATH,
'img',
'conferences',
'bitss.jpg',
),
'active': False,
'admins': [
'gkroll@berkeley.edu',
'awais@berkeley.edu',
],
'public_projects': True,
'poster': False,
'talk': True,
},
'spsp2015': {
'name': 'Society for Personality and Social Psychology 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'meetings@spsp.org',
],
'poster': True,
'talk': True,
},
'aps2015': {
'name': 'Association for Psychological Science 2015',
'info_url': None,
'logo_url': 'http://www.psychologicalscience.org/images/APS_2015_Banner_990x157.jpg',
'active': True,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'icps2015': {
'name': 'International Convention of Psychological Science 2015',
'info_url': None,
'logo_url': 'http://icps.psychologicalscience.org/wp-content/themes/deepblue/images/ICPS_Website-header_990px.jpg',
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'mpa2015': {
'name': 'Midwestern Psychological Association 2015',
'info_url': None,
'logo_url': 'http://www.midwesternpsych.org/resources/Pictures/MPA%20logo.jpg',
'active': True,
'admins': [
'mpa@kent.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'NCCC2015': {
'name': 'North Carolina Cognition Conference 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'aoverman@elon.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VPRSF2015': {
'name': 'Virginia Piedmont Regional Science Fair 2015',
'info_url': None,
'logo_url': 'http://vprsf.org/wp-content/themes/VPRSF/images/logo.png',
'active': False,
'admins': [
'director@vprsf.org',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'APRS2015': {
'name': 'UVA Annual Postdoctoral Research Symposium 2015',
'info_url': None,
'logo_url': 'http://s1.postimg.org/50qj9u6i7/GPA_Logo.jpg',
'active': False,
'admins': [
'mhurst@virginia.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'ASB2015': {
'name': 'Association of Southeastern Biologists 2015',
'info_url': None,
'logo_url': 'http://www.sebiologists.org/wp/wp-content/uploads/2014/09/banner_image_Large.png',
'active': False,
'admins': [
'amorris.mtsu@gmail.com',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'TeaP2015': {
'name': 'Tagung experimentell arbeitender Psychologen 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VSSEF2015': {
'name': 'Virginia State Science and Engineering Fair 2015',
'info_url': 'http://www.vmi.edu/conferences/vssef/vssef_home/',
'logo_url': 'http://www.vmi.edu/uploadedImages/Images/Headers/vssef4.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2015': {
'name': 'Rocky Mountain Psychological Association 2015',
'info_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/85th_annual_rmpa_conference_program_hr.pdf',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARP2015': {
'name': 'Association for Research in Personality 2015',
'info_url': 'http://www.personality-arp.org/conference/',
'logo_url': 'http://www.personality-arp.org/wp-content/uploads/conference/st-louis-arp.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEP2015': {
'name': 'Society of Experimental Psychologists Meeting 2015',
'info_url': 'http://faculty.virginia.edu/Society_of_Experimental_Psychologists/',
'logo_url': 'http://www.sepsych.org/nav/images/SEP-header.gif',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Reid2015': {
'name': 'L. Starling Reid Undergraduate Psychology Conference 2015',
'info_url': 'http://avillage.web.virginia.edu/Psych/Conference',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NEEPS2015': {
'name': 'Northeastern Evolutionary Psychology Conference 2015',
'info_url': 'http://neeps2015.weebly.com/',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'VaACS2015': {
'name': 'Virginia Section American Chemical Society Student Poster Session 2015',
'info_url': 'http://virginia.sites.acs.org/',
'logo_url': 'http://virginia.sites.acs.org/Bulletin/15/UVA.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MADSSCi2015': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2015',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://s24.postimg.org/qtc3baefp/2015madssci_seasr.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NRAO2015': {
'name': 'National Radio Astronomy Observatory Accretion 2015',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015/posters',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARCS2015': {
'name': 'Advancing Research Communication and Scholarship 2015',
'info_url': 'http://commons.pacificu.edu/arcs/',
'logo_url': 'http://commons.pacificu.edu/assets/md5images/4dfd167454e9f4745360a9550e189323.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'singlecasedesigns2015': {
'name': 'Single Case Designs in Clinical Psychology: Uniting Research and Practice',
'info_url': 'https://www.royalholloway.ac.uk/psychology/events/eventsarticles/singlecasedesignsinclinicalpsychologyunitingresearchandpractice.aspx',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OSFM2015': {
'name': 'OSF for Meetings 2015',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'JSSP2015': {
'name': 'Japanese Society of Social Psychology 2015',
'info_url': 'http://www.socialpsychology.jp/conf2015/index.html',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'4S2015': {
'name': 'Society for Social Studies of Science 2015',
'info_url': 'http://www.4sonline.org/meeting',
'logo_url': 'http://www.4sonline.org/ee/denver-skyline.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IARR2016': {
'name': 'International Association for Relationship Research 2016',
'info_url': 'http://iarr.psych.utoronto.ca/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IA2015': {
'name': 'Inclusive Astronomy 2015',
'info_url': 'https://vanderbilt.irisregistration.com/Home/Site?code=InclusiveAstronomy2015',
'logo_url': 'https://vanderbilt.blob.core.windows.net/images/Inclusive%20Astronomy.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'PsiChiRepository': {
'name': 'Psi Chi Repository',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'R2RC': {
'name': 'Right to Research Coalition',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OpenCon2015': {
'name': 'OpenCon2015',
'info_url': 'http://opencon2015.org/',
'logo_url': 'http://s8.postimg.org/w9b30pxyd/Open_Con2015_new_logo.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2015': {
'name': 'Earth Science Information Partners 2015',
'info_url': 'http://esipfed.org/',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SPSP2016': {
'name': 'Society for Personality and Social Psychology 2016 ',
'info_url': 'http://meeting.spsp.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NACIII': {
'name': '2015 National Astronomy Consortium (NAC) III Workshop',
'info_url': 'https://info.nrao.edu/do/odi/meetings/2015/nac111/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CDS2015': {
'name': 'Cognitive Development Society 2015',
'info_url': 'http://meetings.cogdevsoc.org/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEASR2016': {
'name': 'Southeastern Association of Shared Resources 2016',
'info_url': 'http://seasr.abrf.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
}
def populate_conferences():
for meeting, attrs in MEETING_DATA.iteritems():
meeting = meeting.strip()
admin_emails = attrs.pop('admins')
admin_objs = []
for email in admin_emails:
try:
user = User.find_one(Q('username', 'iexact', email))
admin_objs.append(user)
except ModularOdmException:
raise RuntimeError('Username {0!r} is not registered.'.format(email))
conf = Conference(
endpoint=meeting, admins=admin_objs, **attrs
)
try:
conf.save()
except ModularOdmException:
print('{0} Conference already exists. Updating existing record...'.format(meeting))
conf = Conference.find_one(Q('endpoint', 'eq', meeting))
for key, value in attrs.items():
setattr(conf, key, value)
conf.admins = admin_objs
changed_fields = conf.save()
if changed_fields:
print('Changed: {}'.format(changed_fields))
else:
print('Added new Conference: {}'.format(meeting))
if __name__ == '__main__':
main()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from txamqp.client import Closed
from txamqp.queue import Empty
from txamqp.content import Content
from txamqp.testlib import TestBase, supportedBrokers, QPID, OPENAMQ
from twisted.internet.defer import inlineCallbacks
class BasicTests(TestBase):
"""Tests for 'methods' on the amqp basic 'class'"""
@supportedBrokers(QPID, OPENAMQ)
@inlineCallbacks
def test_consume_no_local(self):
"""
Test that the no_local flag is honoured in the consume method
"""
channel = self.channel
#setup, declare two queues:
yield channel.queue_declare(queue="test-queue-1a", exclusive=True)
yield channel.queue_declare(queue="test-queue-1b", exclusive=True)
#establish two consumers one of which excludes delivery of locally sent messages
yield channel.basic_consume(consumer_tag="local_included", queue="test-queue-1a")
yield channel.basic_consume(consumer_tag="local_excluded", queue="test-queue-1b", no_local=True)
#send a message
channel.basic_publish(routing_key="test-queue-1a", content=Content("consume_no_local"))
channel.basic_publish(routing_key="test-queue-1b", content=Content("consume_no_local"))
#check the queues of the two consumers
excluded = yield self.client.queue("local_excluded")
included = yield self.client.queue("local_included")
msg = yield included.get()
self.assertEqual("consume_no_local", msg.content.body)
try:
yield excluded.get(timeout=1)
self.fail("Received locally published message though no_local=true")
except Empty: None
@inlineCallbacks
def test_consume_exclusive(self):
"""
Test that the exclusive flag is honoured in the consume method
"""
channel = self.channel
#setup, declare a queue:
yield channel.queue_declare(queue="test-queue-2", exclusive=True)
#check that an exclusive consumer prevents other consumer being created:
yield channel.basic_consume(consumer_tag="first", queue="test-queue-2", exclusive=True)
try:
yield channel.basic_consume(consumer_tag="second", queue="test-queue-2")
self.fail("Expected consume request to fail due to previous exclusive consumer")
except Closed, e:
self.assertChannelException(403, e.args[0])
#open new channel and cleanup last consumer:
channel = yield self.client.channel(2)
yield channel.channel_open()
#check that an exclusive consumer cannot be created if a consumer already exists:
yield channel.basic_consume(consumer_tag="first", queue="test-queue-2")
try:
yield channel.basic_consume(consumer_tag="second", queue="test-queue-2", exclusive=True)
self.fail("Expected exclusive consume request to fail due to previous consumer")
except Closed, e:
self.assertChannelException(403, e.args[0])
@inlineCallbacks
def test_consume_queue_not_found(self):
"""
C{basic_consume} fails with a channel exception with a C{404} code when
the specified queue doesn't exist.
"""
channel = self.channel
try:
#queue specified but doesn't exist:
yield channel.basic_consume(queue="invalid-queue")
self.fail("Expected failure when consuming from non-existent queue")
except Closed, e:
self.assertChannelException(404, e.args[0])
@supportedBrokers(QPID, OPENAMQ)
@inlineCallbacks
def test_consume_queue_unspecified(self):
"""
C{basic_consume} fails with a connection exception with a C{503} code
when no queue is specified.
"""
channel = self.channel
try:
#queue not specified and none previously declared for channel:
yield channel.basic_consume(queue="")
self.fail("Expected failure when consuming from unspecified queue")
except Closed, e:
self.assertConnectionException(530, e.args[0])
@inlineCallbacks
def test_consume_unique_consumers(self):
"""
Ensure unique consumer tags are enforced
"""
channel = self.channel
#setup, declare a queue:
yield channel.queue_declare(queue="test-queue-3", exclusive=True)
#check that attempts to use duplicate tags are detected and prevented:
yield channel.basic_consume(consumer_tag="first", queue="test-queue-3")
try:
yield channel.basic_consume(consumer_tag="first", queue="test-queue-3")
self.fail("Expected consume request to fail due to non-unique tag")
except Closed, e:
self.assertConnectionException(530, e.args[0])
@inlineCallbacks
def test_cancel(self):
"""
Test compliance of the basic.cancel method
"""
channel = self.channel
#setup, declare a queue:
yield channel.queue_declare(queue="test-queue-4", exclusive=True)
yield channel.basic_consume(consumer_tag="my-consumer", queue="test-queue-4")
channel.basic_publish(routing_key="test-queue-4", content=Content("One"))
#cancel should stop messages being delivered
yield channel.basic_cancel(consumer_tag="my-consumer")
channel.basic_publish(routing_key="test-queue-4", content=Content("Two"))
myqueue = yield self.client.queue("my-consumer")
msg = yield myqueue.get(timeout=1)
self.assertEqual("One", msg.content.body)
try:
msg = yield myqueue.get(timeout=1)
self.fail("Got message after cancellation: " + msg)
except Empty: None
#cancellation of non-existant consumers should be handled without error
yield channel.basic_cancel(consumer_tag="my-consumer")
yield channel.basic_cancel(consumer_tag="this-never-existed")
@supportedBrokers(QPID, OPENAMQ)
@inlineCallbacks
def test_ack(self):
"""
Test basic ack/recover behaviour
"""
channel = self.channel
yield channel.queue_declare(queue="test-ack-queue", exclusive=True)
reply = yield channel.basic_consume(queue="test-ack-queue", no_ack=False)
queue = yield self.client.queue(reply.consumer_tag)
channel.basic_publish(routing_key="test-ack-queue", content=Content("One"))
channel.basic_publish(routing_key="test-ack-queue", content=Content("Two"))
channel.basic_publish(routing_key="test-ack-queue", content=Content("Three"))
channel.basic_publish(routing_key="test-ack-queue", content=Content("Four"))
channel.basic_publish(routing_key="test-ack-queue", content=Content("Five"))
msg1 = yield queue.get(timeout=1)
msg2 = yield queue.get(timeout=1)
msg3 = yield queue.get(timeout=1)
msg4 = yield queue.get(timeout=1)
msg5 = yield queue.get(timeout=1)
self.assertEqual("One", msg1.content.body)
self.assertEqual("Two", msg2.content.body)
self.assertEqual("Three", msg3.content.body)
self.assertEqual("Four", msg4.content.body)
self.assertEqual("Five", msg5.content.body)
channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
yield channel.basic_recover(requeue=False)
msg3b = yield queue.get(timeout=1)
msg5b = yield queue.get(timeout=1)
self.assertEqual("Three", msg3b.content.body)
self.assertEqual("Five", msg5b.content.body)
try:
extra = yield queue.get(timeout=1)
self.fail("Got unexpected message: " + extra.content.body)
except Empty: None
@inlineCallbacks
def test_recover_requeue(self):
"""
Test requeing on recovery
"""
channel = self.channel
yield channel.queue_declare(queue="test-requeue", exclusive=True)
subscription = yield channel.basic_consume(queue="test-requeue", no_ack=False)
queue = yield self.client.queue(subscription.consumer_tag)
channel.basic_publish(routing_key="test-requeue", content=Content("One"))
channel.basic_publish(routing_key="test-requeue", content=Content("Two"))
channel.basic_publish(routing_key="test-requeue", content=Content("Three"))
channel.basic_publish(routing_key="test-requeue", content=Content("Four"))
channel.basic_publish(routing_key="test-requeue", content=Content("Five"))
msg1 = yield queue.get(timeout=1)
msg2 = yield queue.get(timeout=1)
msg3 = yield queue.get(timeout=1)
msg4 = yield queue.get(timeout=1)
msg5 = yield queue.get(timeout=1)
self.assertEqual("One", msg1.content.body)
self.assertEqual("Two", msg2.content.body)
self.assertEqual("Three", msg3.content.body)
self.assertEqual("Four", msg4.content.body)
self.assertEqual("Five", msg5.content.body)
channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
yield channel.basic_cancel(consumer_tag=subscription.consumer_tag)
subscription2 = yield channel.basic_consume(queue="test-requeue")
queue2 = yield self.client.queue(subscription2.consumer_tag)
yield channel.basic_recover(requeue=True)
msg3b = yield queue2.get()
msg5b = yield queue2.get()
self.assertEqual("Three", msg3b.content.body)
self.assertEqual("Five", msg5b.content.body)
self.assertEqual(True, msg3b.redelivered)
self.assertEqual(True, msg5b.redelivered)
try:
extra = yield queue2.get(timeout=1)
self.fail("Got unexpected message in second queue: " + extra.content.body)
except Empty: None
try:
extra = yield queue.get(timeout=1)
self.fail("Got unexpected message in original queue: " + extra.content.body)
except Empty: None
@inlineCallbacks
def test_qos_prefetch_count(self):
"""
Test that the prefetch count specified is honoured
"""
#setup: declare queue and subscribe
channel = self.channel
yield channel.queue_declare(queue="test-prefetch-count", exclusive=True)
subscription = yield channel.basic_consume(queue="test-prefetch-count", no_ack=False)
queue = yield self.client.queue(subscription.consumer_tag)
#set prefetch to 5:
yield channel.basic_qos(prefetch_count=5)
#publish 10 messages:
for i in range(1, 11):
channel.basic_publish(routing_key="test-prefetch-count", content=Content("Message %d" % i))
#only 5 messages should have been delivered:
for i in range(1, 6):
msg = yield queue.get(timeout=1)
self.assertEqual("Message %d" % i, msg.content.body)
try:
extra = yield queue.get(timeout=1)
self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
except Empty: None
#ack messages and check that the next set arrive ok:
channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
for i in range(6, 11):
msg = yield queue.get(timeout=1)
self.assertEqual("Message %d" % i, msg.content.body)
channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
try:
extra = yield queue.get(timeout=1)
self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
except Empty: None
@supportedBrokers(QPID, OPENAMQ)
@inlineCallbacks
def test_qos_prefetch_size(self):
"""
Test that the prefetch size specified is honoured
"""
#setup: declare queue and subscribe
channel = self.channel
yield channel.queue_declare(queue="test-prefetch-size", exclusive=True)
subscription = yield channel.basic_consume(queue="test-prefetch-size", no_ack=False)
queue = yield self.client.queue(subscription.consumer_tag)
#set prefetch to 50 bytes (each message is 9 or 10 bytes):
channel.basic_qos(prefetch_size=50)
#publish 10 messages:
for i in range(1, 11):
channel.basic_publish(routing_key="test-prefetch-size", content=Content("Message %d" % i))
#only 5 messages should have been delivered (i.e. 45 bytes worth):
for i in range(1, 6):
msg = yield queue.get(timeout=1)
self.assertEqual("Message %d" % i, msg.content.body)
try:
extra = yield queue.get(timeout=1)
self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
except Empty: None
#ack messages and check that the next set arrive ok:
channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
for i in range(6, 11):
msg = yield queue.get(timeout=1)
self.assertEqual("Message %d" % i, msg.content.body)
channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
try:
extra = yield queue.get(timeout=1)
self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
except Empty: None
#make sure that a single oversized message still gets delivered
large = "abcdefghijklmnopqrstuvwxyz"
large = large + "-" + large;
channel.basic_publish(routing_key="test-prefetch-size", content=Content(large))
msg = yield queue.get(timeout=1)
self.assertEqual(large, msg.content.body)
@inlineCallbacks
def test_get(self):
"""
Test basic_get method
"""
channel = self.channel
yield channel.queue_declare(queue="test-get", exclusive=True)
#publish some messages (no_ack=True) with persistent messaging
for i in range(1, 11):
msg=Content("Message %d" % i)
msg["delivery mode"] = 2
channel.basic_publish(routing_key="test-get",content=msg )
#use basic_get to read back the messages, and check that we get an empty at the end
for i in range(1, 11):
reply = yield channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get-ok")
self.assertEqual("Message %d" % i, reply.content.body)
reply = yield channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get-empty")
#publish some messages (no_ack=True) transient messaging
for i in range(11, 21):
channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
#use basic_get to read back the messages, and check that we get an empty at the end
for i in range(11, 21):
reply = yield channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get-ok")
self.assertEqual("Message %d" % i, reply.content.body)
reply = yield channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get-empty")
#repeat for no_ack=False
#publish some messages (no_ack=False) with persistent messaging
for i in range(21, 31):
msg=Content("Message %d" % i)
msg["delivery mode"] = 2
channel.basic_publish(routing_key="test-get",content=msg )
#use basic_get to read back the messages, and check that we get an empty at the end
for i in range(21, 31):
reply = yield channel.basic_get(no_ack=False)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get-ok")
self.assertEqual("Message %d" % i, reply.content.body)
reply = yield channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get-empty")
#public some messages (no_ack=False) with transient messaging
for i in range(31, 41):
channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
for i in range(31, 41):
reply = yield channel.basic_get(no_ack=False)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get-ok")
self.assertEqual("Message %d" % i, reply.content.body)
if(i == 33):
channel.basic_ack(delivery_tag=reply.delivery_tag, multiple=True)
if(i in [35, 37, 39]):
channel.basic_ack(delivery_tag=reply.delivery_tag)
reply = yield channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get-empty")
#recover(requeue=True)
yield channel.basic_recover(requeue=True)
#get the unacked messages again (34, 36, 38, 40)
for i in [34, 36, 38, 40]:
reply = yield channel.basic_get(no_ack=False)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get-ok")
self.assertEqual("Message %d" % i, reply.content.body)
channel.basic_ack(delivery_tag=reply.delivery_tag)
reply = yield channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get-empty")
yield channel.basic_recover(requeue=True)
reply = yield channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get-empty")
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import pandas as pd
import numpy as np
import re
import pickle
from tensorflow.python.keras.preprocessing import sequence
from tensorflow.python.keras.preprocessing import text
from tensorflow.python.keras import models
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.layers import Embedding
from tensorflow.python.keras.layers import Conv1D
from tensorflow.python.keras.layers import MaxPooling1D
from tensorflow.python.keras.layers import GlobalAveragePooling1D
from google.cloud import storage
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
CLASSES = {'github': 0, 'nytimes': 1, 'techcrunch': 2} # label-to-int mapping
TOP_K = 20000 # Limit on the number vocabulary size used for tokenization
MAX_SEQUENCE_LENGTH = 50 # Sentences will be truncated/padded to this length
"""
Helper function to download data from Google Cloud Storage
# Arguments:
source: string, the GCS URL to download from (e.g. 'gs://bucket/file.csv')
destination: string, the filename to save as on local disk. MUST be filename
ONLY, doesn't support folders. (e.g. 'file.csv', NOT 'folder/file.csv')
# Returns: nothing, downloads file to local disk
"""
def download_from_gcs(source, destination):
search = re.search('gs://(.*?)/(.*)', source)
bucket_name = search.group(1)
blob_name = search.group(2)
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
bucket.blob(blob_name).download_to_filename(destination)
"""
Parses raw tsv containing hacker news headlines and returns (sentence, integer label) pairs
# Arguments:
train_data_path: string, path to tsv containing training data.
can be a local path or a GCS url (gs://...)
eval_data_path: string, path to tsv containing eval data.
can be a local path or a GCS url (gs://...)
# Returns:
((train_sentences, train_labels), (test_sentences, test_labels)): sentences
are lists of strings, labels are numpy integer arrays
"""
def load_hacker_news_data(train_data_path, eval_data_path):
if train_data_path.startswith('gs://'):
download_from_gcs(train_data_path, destination='train.csv')
train_data_path = 'train.csv'
if eval_data_path.startswith('gs://'):
download_from_gcs(eval_data_path, destination='eval.csv')
eval_data_path = 'eval.csv'
# Parse CSV using pandas
column_names = ('label', 'text')
df_train = pd.read_csv(train_data_path, names=column_names, sep='\t')
df_eval = pd.read_csv(eval_data_path, names=column_names, sep='\t')
return ((list(df_train['text']), np.array(df_train['label'].map(CLASSES))),
(list(df_eval['text']), np.array(df_eval['label'].map(CLASSES))))
"""
Create tf.estimator compatible input function
# Arguments:
texts: [strings], list of sentences
labels: numpy int vector, integer labels for sentences
tokenizer: tf.python.keras.preprocessing.text.Tokenizer
used to convert sentences to integers
batch_size: int, number of records to use for each train batch
mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.EVAL
# Returns:
tf.estimator.inputs.numpy_input_fn, produces feature and label
tensors one batch at a time
"""
def input_fn(texts, labels, tokenizer, batch_size, mode):
# Transform text to sequence of integers
x = tokenizer.texts_to_sequences(texts)
# Fix sequence length to max value. Sequences shorter than the length are
# padded in the beginning and sequences longer are truncated
# at the beginning.
x = sequence.pad_sequences(x, maxlen=MAX_SEQUENCE_LENGTH)
# default settings for training
num_epochs = None
shuffle = True
# override if this is eval
if mode == tf.estimator.ModeKeys.EVAL:
num_epochs = 1
shuffle = False
return tf.compat.v1.estimator.inputs.numpy_input_fn(
x,
y=labels,
batch_size=batch_size,
num_epochs=num_epochs,
shuffle=shuffle,
queue_capacity=50000
)
"""
Builds a CNN model using keras and converts to tf.estimator.Estimator
# Arguments
model_dir: string, file path where training files will be written
config: tf.estimator.RunConfig, specifies properties of tf Estimator
filters: int, output dimension of the layers.
kernel_size: int, length of the convolution window.
embedding_dim: int, dimension of the embedding vectors.
dropout_rate: float, percentage of input to drop at Dropout layers.
pool_size: int, factor by which to downscale input at MaxPooling layer.
embedding_path: string , file location of pre-trained embedding (if used)
defaults to None which will cause the model to train embedding from scratch
word_index: dictionary, mapping of vocabulary to integers. used only if
pre-trained embedding is provided
# Returns
A tf.estimator.Estimator
"""
def keras_estimator(model_dir,
config,
learning_rate,
filters=64,
dropout_rate=0.2,
embedding_dim=200,
kernel_size=3,
pool_size=3,
embedding_path=None,
word_index=None):
# Create model instance.
model = models.Sequential()
num_features = min(len(word_index) + 1, TOP_K)
# Add embedding layer. If pre-trained embedding is used add weights to the
# embeddings layer and set trainable to input is_embedding_trainable flag.
if embedding_path != None:
embedding_matrix = get_embedding_matrix(word_index, embedding_path, embedding_dim)
is_embedding_trainable = True # set to False to freeze embedding weights
model.add(Embedding(input_dim=num_features,
output_dim=embedding_dim,
input_length=MAX_SEQUENCE_LENGTH,
weights=[embedding_matrix],
trainable=is_embedding_trainable))
else:
model.add(Embedding(input_dim=num_features,
output_dim=embedding_dim,
input_length=MAX_SEQUENCE_LENGTH))
model.add(Dropout(rate=dropout_rate))
model.add(Conv1D(filters=filters,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
padding='same'))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Conv1D(filters=filters * 2,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
padding='same'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(rate=dropout_rate))
model.add(Dense(len(CLASSES), activation='softmax'))
# Compile model with learning parameters.
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc'])
estimator = tf.keras.estimator.model_to_estimator(keras_model=model, model_dir=model_dir, config=config)
return estimator
"""
Defines the features to be passed to the model during inference
Expects already tokenized and padded representation of sentences
# Arguments: none
# Returns: tf.estimator.export.ServingInputReceiver
"""
def serving_input_fn():
feature_placeholder = tf.compat.v1.placeholder(tf.int16, [None, MAX_SEQUENCE_LENGTH])
features = feature_placeholder # pass as-is
return tf.estimator.export.TensorServingInputReceiver(features, feature_placeholder)
"""
Takes embedding for generic voabulary and extracts the embeddings
matching the current vocabulary
The pre-trained embedding file is obtained from https://nlp.stanford.edu/projects/glove/
# Arguments:
word_index: dict, {key =word in vocabulary: value= integer mapped to that word}
embedding_path: string, location of the pre-trained embedding file on disk
embedding_dim: int, dimension of the embedding space
# Returns: numpy matrix of shape (vocabulary, embedding_dim) that contains the embedded
representation of each word in the vocabulary.
"""
def get_embedding_matrix(word_index, embedding_path, embedding_dim):
# Read the pre-trained embedding file and get word to word vector mappings.
embedding_matrix_all = {}
# Download if embedding file is in GCS
if embedding_path.startswith('gs://'):
download_from_gcs(embedding_path, destination='embedding.csv')
embedding_path = 'embedding.csv'
with open(embedding_path) as f:
for line in f: # Every line contains word followed by the vector value
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embedding_matrix_all[word] = coefs
# Prepare embedding matrix with just the words in our word_index dictionary
num_words = min(len(word_index) + 1, TOP_K)
embedding_matrix = np.zeros((num_words, embedding_dim))
for word, i in word_index.items():
if i >= TOP_K:
continue
embedding_vector = embedding_matrix_all.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
"""
Main orchestrator. Responsible for calling all other functions in model.py
# Arguments:
output_dir: string, file path where training files will be written
hparams: dict, command line parameters passed from task.py
# Returns: nothing, kicks off training and evaluation
"""
def train_and_evaluate(output_dir, hparams):
tf.compat.v1.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
# Load Data
((train_texts, train_labels), (test_texts, test_labels)) = load_hacker_news_data(
hparams['train_data_path'], hparams['eval_data_path'])
# Create vocabulary from training corpus.
tokenizer = text.Tokenizer(num_words=TOP_K)
tokenizer.fit_on_texts(train_texts)
# Save token dictionary to use during prediction time
pickle.dump(tokenizer, open('tokenizer.pickled', 'wb'))
# Create estimator
run_config = tf.estimator.RunConfig(save_checkpoints_steps=500)
estimator = keras_estimator(
model_dir=output_dir,
config=run_config,
learning_rate=hparams['learning_rate'],
embedding_path=hparams['embedding_path'],
word_index=tokenizer.word_index
)
# Create TrainSpec
train_steps = hparams['num_epochs'] * len(train_texts) / hparams['batch_size']
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn(
train_texts,
train_labels,
tokenizer,
hparams['batch_size'],
mode=tf.estimator.ModeKeys.TRAIN),
max_steps=train_steps
)
# Create EvalSpec
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn=input_fn(
test_texts,
test_labels,
tokenizer,
hparams['batch_size'],
mode=tf.estimator.ModeKeys.EVAL),
steps=None,
exporters=exporter,
start_delay_secs=10,
throttle_secs=10
)
# Start training
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| |
# Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
IntEnum constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Integer constants:
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io, selectors
from enum import IntEnum, IntFlag
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["fromfd", "getfqdn", "create_connection", "create_server",
"has_dualstack_ipv6", "AddressFamily", "SocketKind"]
__all__.extend(os._get_exports_list(_socket))
# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for
# nicer string representations.
# Note that _socket only knows about the integer values. The public interface
# in this module understands the enums and translates them back from integers
# where needed (e.g. .family property of a socket object).
IntEnum._convert_(
'AddressFamily',
__name__,
lambda C: C.isupper() and C.startswith('AF_'))
IntEnum._convert_(
'SocketKind',
__name__,
lambda C: C.isupper() and C.startswith('SOCK_'))
IntFlag._convert_(
'MsgFlag',
__name__,
lambda C: C.isupper() and C.startswith('MSG_'))
IntFlag._convert_(
'AddressInfo',
__name__,
lambda C: C.isupper() and C.startswith('AI_'))
_LOCALHOST = '127.0.0.1'
_LOCALHOST_V6 = '::1'
def _intenum_converter(value, enum_klass):
"""Convert a numeric family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
try:
return enum_klass(value)
except ValueError:
return value
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[6] = "Specified event object handle is invalid."
errorTab[8] = "Insufficient memory available."
errorTab[87] = "One or more parameters are invalid."
errorTab[995] = "Overlapped operation aborted."
errorTab[996] = "Overlapped I/O event object not in signaled state."
errorTab[997] = "Overlapped operation will complete later."
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10024] = "Too many open files."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10037] = "Operation already in progress."
errorTab[10038] = "Socket operation on nonsocket."
errorTab[10039] = "Destination address required."
errorTab[10040] = "Message too long."
errorTab[10041] = "Protocol wrong type for socket."
errorTab[10042] = "Bad protocol option."
errorTab[10043] = "Protocol not supported."
errorTab[10044] = "Socket type not supported."
errorTab[10045] = "Operation not supported."
errorTab[10046] = "Protocol family not supported."
errorTab[10047] = "Address family not supported by protocol family."
errorTab[10048] = "The network address is in use."
errorTab[10049] = "Cannot assign requested address."
errorTab[10050] = "Network is down."
errorTab[10051] = "Network is unreachable."
errorTab[10052] = "Network dropped connection on reset."
errorTab[10053] = "Software caused connection abort."
errorTab[10054] = "The connection has been reset."
errorTab[10055] = "No buffer space available."
errorTab[10056] = "Socket is already connected."
errorTab[10057] = "Socket is not connected."
errorTab[10058] = "The network has been shut down."
errorTab[10059] = "Too many references."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10062] = "Cannot translate name."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
errorTab[10066] = "Directory not empty."
errorTab[10067] = "Too many processes."
errorTab[10068] = "User quota exceeded."
errorTab[10069] = "Disk quota exceeded."
errorTab[10070] = "Stale file handle reference."
errorTab[10071] = "Item is remote."
errorTab[10091] = "Network subsystem is unavailable."
errorTab[10092] = "Winsock.dll version out of range."
errorTab[10093] = "Successful WSAStartup not yet performed."
errorTab[10101] = "Graceful shutdown in progress."
errorTab[10102] = "No more results from WSALookupServiceNext."
errorTab[10103] = "Call has been canceled."
errorTab[10104] = "Procedure call table is invalid."
errorTab[10105] = "Service provider is invalid."
errorTab[10106] = "Service provider failed to initialize."
errorTab[10107] = "System call failure."
errorTab[10108] = "Service not found."
errorTab[10109] = "Class type not found."
errorTab[10110] = "No more results from WSALookupServiceNext."
errorTab[10111] = "Call was canceled."
errorTab[10112] = "Database query was refused."
errorTab[11001] = "Host not found."
errorTab[11002] = "Nonauthoritative host not found."
errorTab[11003] = "This is a nonrecoverable error."
errorTab[11004] = "Valid name, no data record requested type."
errorTab[11005] = "QoS receivers."
errorTab[11006] = "QoS senders."
errorTab[11007] = "No QoS senders."
errorTab[11008] = "QoS no receivers."
errorTab[11009] = "QoS request confirmed."
errorTab[11010] = "QoS admission error."
errorTab[11011] = "QoS policy failure."
errorTab[11012] = "QoS bad style."
errorTab[11013] = "QoS bad object."
errorTab[11014] = "QoS traffic control error."
errorTab[11015] = "QoS generic error."
errorTab[11016] = "QoS service type error."
errorTab[11017] = "QoS flowspec error."
errorTab[11018] = "Invalid QoS provider buffer."
errorTab[11019] = "Invalid QoS filter style."
errorTab[11020] = "Invalid QoS filter style."
errorTab[11021] = "Incorrect QoS filter count."
errorTab[11022] = "Invalid QoS object length."
errorTab[11023] = "Incorrect QoS flow count."
errorTab[11024] = "Unrecognized QoS object."
errorTab[11025] = "Invalid QoS policy object."
errorTab[11026] = "Invalid QoS flow descriptor."
errorTab[11027] = "Invalid QoS provider-specific flowspec."
errorTab[11028] = "Invalid QoS provider-specific filterspec."
errorTab[11029] = "Invalid QoS shape discard mode object."
errorTab[11030] = "Invalid QoS shaping rate object."
errorTab[11031] = "Reserved policy QoS element type."
__all__.append("errorTab")
class _GiveupOnSendfile(Exception): pass
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=-1, type=-1, proto=-1, fileno=None):
# For user code address family and type values are IntEnum members, but
# for the underlying _socket.socket they're just integers. The
# constructor of _socket.socket converts the given argument to an
# integer automatically.
if fileno is None:
if family == -1:
family = AF_INET
if type == -1:
type = SOCK_STREAM
if proto == -1:
proto = 0
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name and socket
address(es).
"""
closed = getattr(self, '_closed', False)
s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \
% (self.__class__.__module__,
self.__class__.__qualname__,
" [closed]" if closed else "",
self.fileno(),
self.family,
self.type,
self.proto)
if not closed:
try:
laddr = self.getsockname()
if laddr:
s += ", laddr=%s" % str(laddr)
except error:
pass
try:
raddr = self.getpeername()
if raddr:
s += ", raddr=%s" % str(raddr)
except error:
pass
s += '>'
return s
def __getstate__(self):
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
def dup(self):
"""dup() -> socket object
Duplicate the socket. Return a new socket object connected to the same
system resource. The new socket is non-inheritable.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
sock = socket(self.family, self.type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename, except the only
supported mode values are 'r' (default), 'w' and 'b'.
"""
# XXX refactor to share code?
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
if hasattr(os, 'sendfile'):
def _sendfile_use_sendfile(self, file, offset=0, count=None):
self._check_sendfile_params(file, offset, count)
sockno = self.fileno()
try:
fileno = file.fileno()
except (AttributeError, io.UnsupportedOperation) as err:
raise _GiveupOnSendfile(err) # not a regular file
try:
fsize = os.fstat(fileno).st_size
except OSError as err:
raise _GiveupOnSendfile(err) # not a regular file
if not fsize:
return 0 # empty file
# Truncate to 1GiB to avoid OverflowError, see bpo-38319.
blocksize = min(count or fsize, 2 ** 30)
timeout = self.gettimeout()
if timeout == 0:
raise ValueError("non-blocking sockets are not supported")
# poll/select have the advantage of not requiring any
# extra file descriptor, contrarily to epoll/kqueue
# (also, they require a single syscall).
if hasattr(selectors, 'PollSelector'):
selector = selectors.PollSelector()
else:
selector = selectors.SelectSelector()
selector.register(sockno, selectors.EVENT_WRITE)
total_sent = 0
# localize variable access to minimize overhead
selector_select = selector.select
os_sendfile = os.sendfile
try:
while True:
if timeout and not selector_select(timeout):
raise _socket.timeout('timed out')
if count:
blocksize = count - total_sent
if blocksize <= 0:
break
try:
sent = os_sendfile(sockno, fileno, offset, blocksize)
except BlockingIOError:
if not timeout:
# Block until the socket is ready to send some
# data; avoids hogging CPU resources.
selector_select()
continue
except OSError as err:
if total_sent == 0:
# We can get here for different reasons, the main
# one being 'file' is not a regular mmap(2)-like
# file, in which case we'll fall back on using
# plain send().
raise _GiveupOnSendfile(err)
raise err from None
else:
if sent == 0:
break # EOF
offset += sent
total_sent += sent
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset)
else:
def _sendfile_use_sendfile(self, file, offset=0, count=None):
raise _GiveupOnSendfile(
"os.sendfile() not available on this platform")
def _sendfile_use_send(self, file, offset=0, count=None):
self._check_sendfile_params(file, offset, count)
if self.gettimeout() == 0:
raise ValueError("non-blocking sockets are not supported")
if offset:
file.seek(offset)
blocksize = min(count, 8192) if count else 8192
total_sent = 0
# localize variable access to minimize overhead
file_read = file.read
sock_send = self.send
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
data = memoryview(file_read(blocksize))
if not data:
break # EOF
while True:
try:
sent = sock_send(data)
except BlockingIOError:
continue
else:
total_sent += sent
if sent < len(data):
data = data[sent:]
else:
break
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not self.type & SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
def sendfile(self, file, offset=0, count=None):
"""sendfile(file[, offset[, count]]) -> sent
Send a file until EOF is reached by using high-performance
os.sendfile() and return the total number of bytes which
were sent.
*file* must be a regular file object opened in binary mode.
If os.sendfile() is not available (e.g. Windows) or file is
not a regular file socket.send() will be used instead.
*offset* tells from where to start reading the file.
If specified, *count* is the total number of bytes to transmit
as opposed to sending the file until EOF is reached.
File position is updated on return or also in case of error in
which case file.tell() can be used to figure out the number of
bytes which were sent.
The socket must be of SOCK_STREAM type.
Non-blocking sockets are not supported.
"""
try:
return self._sendfile_use_sendfile(file, offset, count)
except _GiveupOnSendfile:
return self._sendfile_use_send(file, offset, count)
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
@property
def family(self):
"""Read-only access to the address family for this socket.
"""
return _intenum_converter(super().family, AddressFamily)
@property
def type(self):
"""Read-only access to the socket type.
"""
return _intenum_converter(super().type, SocketKind)
if os.name == 'nt':
def get_inheritable(self):
return os.get_handle_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_handle_inheritable(self.fileno(), inheritable)
else:
def get_inheritable(self):
return os.get_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_inheritable(self.fileno(), inheritable)
get_inheritable.__doc__ = "Get the inheritable flag of the socket"
set_inheritable.__doc__ = "Set the inheritable flag of the socket"
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
__all__.append("fromshare")
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
else:
# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
def socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
if family == AF_INET:
host = _LOCALHOST
elif family == AF_INET6:
host = _LOCALHOST_V6
else:
raise ValueError("Only AF_INET and AF_INET6 socket address families "
"are supported")
if type != SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with
# setblocking(False) that prevents us from having to create a thread.
lsock = socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen()
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
return (ssock, csock)
__all__.append("socketpair")
socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is AF_UNIX
if defined on the platform; otherwise, the default is AF_INET.
"""
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise OSError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
A host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
# Break explicitly a reference cycle
err = None
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
try:
raise err
finally:
# Break explicitly a reference cycle
err = None
else:
raise error("getaddrinfo returns an empty list")
def has_dualstack_ipv6():
"""Return True if the platform supports creating a SOCK_STREAM socket
which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections.
"""
if not has_ipv6 \
or not hasattr(_socket, 'IPPROTO_IPV6') \
or not hasattr(_socket, 'IPV6_V6ONLY'):
return False
try:
with socket(AF_INET6, SOCK_STREAM) as sock:
sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0)
return True
except error:
return False
def create_server(address, *, family=AF_INET, backlog=None, reuse_port=False,
dualstack_ipv6=False):
"""Convenience function which creates a SOCK_STREAM type socket
bound to *address* (a 2-tuple (host, port)) and return the socket
object.
*family* should be either AF_INET or AF_INET6.
*backlog* is the queue size passed to socket.listen().
*reuse_port* dictates whether to use the SO_REUSEPORT socket option.
*dualstack_ipv6*: if true and the platform supports it, it will
create an AF_INET6 socket able to accept both IPv4 or IPv6
connections. When false it will explicitly disable this option on
platforms that enable it by default (e.g. Linux).
>>> with create_server(('', 8000)) as server:
... while True:
... conn, addr = server.accept()
... # handle new connection
"""
if reuse_port and not hasattr(_socket, "SO_REUSEPORT"):
raise ValueError("SO_REUSEPORT not supported on this platform")
if dualstack_ipv6:
if not has_dualstack_ipv6():
raise ValueError("dualstack_ipv6 not supported on this platform")
if family != AF_INET6:
raise ValueError("dualstack_ipv6 requires AF_INET6 family")
sock = socket(family, SOCK_STREAM)
try:
# Note about Windows. We don't set SO_REUSEADDR because:
# 1) It's unnecessary: bind() will succeed even in case of a
# previous closed socket on the same address and still in
# TIME_WAIT state.
# 2) If set, another socket is free to bind() on the same
# address, effectively preventing this one from accepting
# connections. Also, it may set the process in a state where
# it'll no longer respond to any signals or graceful kills.
# See: msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx
if os.name not in ('nt', 'cygwin') and \
hasattr(_socket, 'SO_REUSEADDR'):
try:
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
except error:
# Fail later on bind(), for platforms which may not
# support this option.
pass
if reuse_port:
sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
if has_ipv6 and family == AF_INET6:
if dualstack_ipv6:
sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0)
elif hasattr(_socket, "IPV6_V6ONLY") and \
hasattr(_socket, "IPPROTO_IPV6"):
sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1)
try:
sock.bind(address)
except error as err:
msg = '%s (while attempting to bind on address %r)' % \
(err.strerror, address)
raise error(err.errno, msg) from None
if backlog is None:
sock.listen()
else:
sock.listen(backlog)
return sock
except error:
sock.close()
raise
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
"""Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
"""
# We override this function since we want to translate the numeric family
# and socket type values to enum constants.
addrlist = []
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
af, socktype, proto, canonname, sa = res
addrlist.append((_intenum_converter(af, AddressFamily),
_intenum_converter(socktype, SocketKind),
proto, canonname, sa))
return addrlist
| |
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import mshr
from dolfin import *
import sympy as sy
import numpy as np
import ExactSol
import MatrixOperations as MO
import CheckPetsc4py as CP
from dolfin import __version__
def Domain(n):
# defining the L-shaped domain
# domain = mshr.Rectangle(Point(-1., -1.), Point(1., 1.)) - mshr.Rectangle(Point(0., -1.), Point(1., 0.) )
# mesh = mshr.generate_mesh(domain, n)
if __version__ == '1.6.0':
mesh = RectangleMesh(Point(-1., -1.), Point(1., 1.),n,n)
else:
mesh = RectangleMesh(-1,-1,1,1,n,n, 'left')
cell_f = CellFunction('size_t', mesh, 0)
for cell in cells(mesh):
v = cell.get_vertex_coordinates()
y = v[np.arange(0,6,2)]
x = v[np.arange(1,6,2)]
xone = np.ones(3)
xone[x > 0] = 0
yone = np.ones(3)
yone[y < 0] = 0
if np.sum(xone)+ np.sum(yone)>5.5:
cell_f[cell] = 1
mesh = SubMesh(mesh, cell_f, 0)
# cell_markers = CellFunction("bool", mesh)
# cell_markers.set_all(False)
# origin = Point(0., 0.)
# for cell in cells(mesh):
# p = cell.midpoint()
# if abs(p.distance(origin)) < 0.6:
# cell_markers[cell] = True
# mesh = refine(mesh, cell_markers)
# cell_markers = CellFunction("bool", mesh)
# cell_markers.set_all(False)
# origin = Point(0., 0.)
# for cell in cells(mesh):
# p = cell.midpoint()
# if abs(p.distance(origin)) < 0.4:
# cell_markers[cell] = True
# mesh = refine(mesh, cell_markers)
# cell_markers = CellFunction("bool", mesh)
# cell_markers.set_all(False)
# origin = Point(0., 0.)
# for cell in cells(mesh):
# p = cell.midpoint()
# if abs(p.distance(origin)) < 0.2:
# cell_markers[cell] = True
# mesh = refine(mesh, cell_markers)
# Creating classes that define the boundary of the domain
class Left(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], -1.0)
class Right(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 1.0)
class Bottom(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], -1.0)
class Top(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 1.0)
class CornerTop(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 0.0) and between(x[0], (0.0,1.0))
class CornerLeft(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 0.0) and between(x[1], (-1.0,0.0))
left = Left()
top = Top()
right = Right()
bottom = Bottom()
cleft = CornerLeft()
ctop = CornerTop()
# Initialize mesh function for the domain
domains = CellFunction("size_t", mesh)
domains.set_all(0)
# Initialize mesh function for boundary domains
boundaries = FacetFunction("size_t", mesh)
boundaries.set_all(0)
left.mark(boundaries, 1)
top.mark(boundaries, 1)
bottom.mark(boundaries, 1)
right.mark(boundaries, 1)
cleft.mark(boundaries, 2)
ctop.mark(boundaries, 2)
return mesh, boundaries, domains
# functions that perform partial derivatives of x and y with respect to polar coordinates
def polarx(u, rho, phi):
return sy.cos(phi)*sy.diff(u, rho) - (1./rho)*sy.sin(phi)*sy.diff(u, phi)
def polary(u, rho, phi):
return sy.sin(phi)*sy.diff(u, rho) + (1./rho)*sy.cos(phi)*sy.diff(u, phi)
def polarr(u, x, y):
return (1./sqrt(x**2 + y**2))*(x*sy.diff(u,x)+y*sy.diff(u,y))
def polart(u, x, y):
return -y*sy.diff(u,x)+x*sy.diff(u,y)
def SolutionPolar(mesh, params):
l = 0.54448373678246
omega = (3./2)*np.pi
phi = sy.symbols('x[1]')
rho = sy.symbols('x[0]')
z = sy.symbols('z')
# looked at all the exact solutions and they seems to be the same as the paper.....
psi = (sy.sin((1+l)*phi)*sy.cos(l*omega))/(1+l) - sy.cos((1+l)*phi) - (sy.sin((1-l)*phi)*sy.cos(l*omega))/(1-l) + sy.cos((1-l)*phi)
psi_prime = sy.diff(psi, phi)
psi_3prime = sy.diff(psi, phi, phi, phi)
u = rho**l*((1+l)*sy.sin(phi)*psi + sy.cos(phi)*psi_prime)
v = rho**l*(-(1+l)*sy.cos(phi)*psi + sy.sin(phi)*psi_prime)
uu0 = Expression((sy.ccode(u),sy.ccode(v)))
# sssss
p = -rho**(l-1)*((1+l)**2*psi_prime + psi_3prime)/(1-l)
pu0 = Expression(sy.ccode(p))
f = rho**(2./3)*sy.sin((2./3)*phi)
# b = sy.diff(f,rho)
b = polarx(f, rho, phi)
# d = (1./rho)*sy.diff(f,phi)
d = polary(f, rho, phi)
bu0 = Expression((sy.ccode(b),sy.ccode(d)))
r = sy.diff(phi,rho)
ru0 = Expression(sy.ccode(r))
# Defining polarx and polary as the x and y derivatives with respect to polar coordinates (rho, phi). Writing the right handside with respect to cartesian coords
#Laplacian
L1 = polarx(polarx(u, rho, phi), rho, phi) + polary(polary(u, rho, phi), rho, phi)
L2 = polarx(polarx(v, rho, phi), rho, phi) + polary(polary(v, rho, phi), rho, phi)
# Advection
A1 = u*polarx(u, rho, phi)+v*polary(u, rho, phi)
A2 = u*polarx(v, rho, phi)+v*polary(v, rho, phi)
# Pressure gradient
P1 = polarx(p, rho, phi)
P2 = polary(p, rho, phi)
# Curl-curl
C1 = polarx(polary(d, rho, phi), rho, phi) - polary(polary(b, rho, phi), rho, phi)
C2 = polarx(polary(b, rho, phi), rho, phi) - polary(polary(d, rho, phi), rho, phi)
# Multiplier gradient
R1 = sy.diff(r, rho)
R2 = sy.diff(r, rho)
# Coupling term for fluid variables
NS1 = -d*(polarx(d, rho, phi)-polary(b, rho, phi))
NS2 = b*(polarx(d, rho, phi)-polary(b, rho, phi))
# Coupling term for Magnetic variables
M1 = polary(u*d-v*b, rho, phi)
M2 = -polarx(u*d-v*b, rho, phi)
# Using https://en.wikipedia.org/wiki/Del_in_cylindrical_and_spherical_coordinates defintitions of the derivative operators (sy.diff(u,rho) means partial derivative of u with respect to rho)
# Laplacian
L11 = (1./rho)*sy.diff(rho*sy.diff(u,rho),rho) + (1./(rho**2))*sy.diff(sy.diff(u,phi),phi) - (1./rho**2)*u - (2./rho**2)*sy.diff(v, phi)
L22 = (1./rho)*sy.diff(rho*sy.diff(v,rho),rho) + (1./(rho**2))*sy.diff(sy.diff(v,phi),phi) - (1./rho**2)*v + (2./rho**2)*sy.diff(u, phi)
# Advection
A11 = u*sy.diff(u, rho) + (1./rho)*v*sy.diff(u, phi) - u**2/rho
A22 = u*sy.diff(v, rho) + (1./rho)*v*sy.diff(v, phi) + v*u/rho
# Pressure gradient
P11 = sy.diff(p, rho)
P22 = (1./rho)*sy.diff(p, phi)
# Curl-curl
c = (1./rho)*(sy.diff(rho*d, rho) - sy.diff(b, phi))
C11 = (1./rho)*sy.diff(c, phi)
C22 = -sy.diff(c, rho)
# Multiplier gradient
R11 = sy.diff(r, rho)
R22 = sy.diff(r, rho)
# Coupling term for fluid variables
NS11 = -c*d
NS22 = c*b
# Coupling term for Magnetic variables
c = u*d-v*b
M11 = (1./rho)*sy.diff(c, phi)
M22 = -sy.diff(c, rho)
FF = sy.diff(u, rho) + (1./rho)*sy.diff(v, phi)
# print "\n\n\nL limits \n\n"
# print sy.limit(L1, rho,0), sy.limit(sy.limit(L1, phi,0),rho,0)
# print sy.limit(L11, rho,0), sy.limit(sy.limit(L11, phi,0),rho,0)
# print "\n", sy.limit(L2, rho,0), sy.limit(sy.limit(L2, phi,0),rho,0)
# print sy.limit(L22, rho,0), sy.limit(sy.limit(L22, phi,0),rho,0)
# print "\n\n\nA limits \n\n"
# print sy.limit(A1, rho,0), sy.limit(sy.limit(A1, phi,0),rho,0)
# print sy.limit(A11, rho,0), sy.limit(sy.limit(A11, phi,0),rho,0)
# print "\n", sy.limit(A2, rho,0), sy.limit(sy.limit(A2, phi,0),rho,0)
# print sy.limit(A22, rho,0), sy.limit(sy.limit(A22, phi,0),rho,0)
# print "\n\n\nP limits \n\n"
# print sy.limit(P1, rho,0), sy.limit(sy.limit(P1, phi,0),rho,0)
# print sy.limit(P11, rho,0), sy.limit(sy.limit(P11, phi,0),rho,0)
# print "\n", sy.limit(P2, rho,0), sy.limit(sy.limit(P2, phi,0),rho,0)
# print sy.limit(P22, rho,0), sy.limit(sy.limit(P22, phi,0),rho,0)
# print "\n\n\nC limits \n\n"
# print sy.limit(C1, rho,0), sy.limit(sy.limit(C1, phi,0),rho,0)
# print sy.limit(C11, rho,0), sy.limit(sy.limit(C11, phi,0),rho,0)
# print "\n", sy.limit(C2, rho,0), sy.limit(sy.limit(C2, phi,0),rho,0)
# print sy.limit(C22, rho,0), sy.limit(sy.limit(C22, phi,0),rho,0)
# print "\n\n\nR limits \n\n"
# print sy.limit(R1, rho,0), sy.limit(sy.limit(R1, phi,0),rho,0)
# print sy.limit(R11, rho,0), sy.limit(sy.limit(R11, phi,0),rho,0)
# print "\n", sy.limit(R2, rho,0), sy.limit(sy.limit(R2, phi,0),rho,0)
# print sy.limit(R22, rho,0), sy.limit(sy.limit(R22, phi,0),rho,0)
# print "N\n\n\nS limits \n\n"
# print sy.limit(NS1, rho,0), sy.limit(sy.limit(NS1, phi,0),rho,0)
# print sy.limit(NS11, rho,0), sy.limit(sy.limit(NS11, phi,0),rho,0)
# print "\n", sy.limit(NS2, rho,0), sy.limit(sy.limit(NS2, phi,0),rho,0)
# print sy.limit(NS22, rho,0), sy.limit(sy.limit(NS22, phi,0),rho,0)
# print "\n\n\nM limits \n\n"
# print sy.limit(M1, rho,0), sy.limit(sy.limit(M1, phi,0),rho,0)
# print sy.limit(M11, rho,0), sy.limit(sy.limit(M11, phi,0),rho,0)
# print "\n", sy.limit(M2, rho,0), sy.limit(sy.limit(M2, phi,0),rho,0)
# print sy.limit(M22, rho,0), sy.limit(sy.limit(M22, phi,0),rho,0)
# print "\n\n\Fluid limits \n\n"
# print sy.limit(u, rho,0), sy.limit(sy.limit(u, phi,0),rho,0)
# print sy.limit(v, rho,0), sy.limit(sy.limit(v, phi,0),rho,0)
# print sy.limit(p, rho,0), sy.limit(sy.limit(p, phi,0),rho,0)
# print "\n\n\Magnetic limits \n\n"
# print sy.limit(b, rho,0), sy.limit(sy.limit(b, phi,0),rho,0)
# print sy.limit(d, rho,0), sy.limit(sy.limit(d, phi,0),rho,0)
# print sy.limit(r, rho,0), sy.limit(sy.limit(r, phi,0),rho,0)
# ssss
# graduu0 = Expression(sy.ccode(sy.diff(u, rho) + (1./rho)*sy.diff(u, phi)))
graduu0 = Expression((sy.ccode(sy.diff(u, rho)),sy.ccode(sy.diff(v, rho))))
Laplacian = Expression((sy.ccode(L11),sy.ccode(L22)))
Advection = Expression((sy.ccode(A11),sy.ccode(A22)))
gradPres = Expression((sy.ccode(P11),sy.ccode(P22)))
CurlCurl = Expression((sy.ccode(C11),sy.ccode(C22)))
gradR = Expression((sy.ccode(R11).replace('M_PI','pi'),sy.ccode(R22).replace('M_PI','pi')))
NS_Couple = Expression((sy.ccode(NS11),sy.ccode(NS22)))
M_Couple = Expression((sy.ccode(M11),sy.ccode(M22)))
# ignore this! Just removes the singularity (atan2(0,0) = NaN) and makes all functions zero at the origin
class u0(Expression):
def __init__(self, mesh, uu0):
self.mesh = mesh
self.u0 = uu0
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 0.0
values[1] = 0.0
else:
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
# print theta
if theta < 0:
theta += 2*np.pi
values[0] = self.u0(r, theta)[0]
values[1] = self.u0(r,theta)[1]
def value_shape(self):
return (2,)
class gradu0(Expression):
def __init__(self, mesh, graduu0):
self.mesh = mesh
self.gradu0 = graduu0
def eval_cell(self, values, x, ufc_cell):
# if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
# values = 0.0
# else:
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
values = self.gradu0(r,theta)
class p0(Expression):
def __init__(self, mesh, pu0):
self.mesh = mesh
self.p0 = pu0
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 0.0
else:
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
values[0] = self.p0(r,theta)
def eval(self, values, x):
# if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
# values = 0.0
# else:
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
values[0] = self.p0(r,theta)
class p0Vec(Expression):
def __init__(self, mesh, pu0):
self.mesh = mesh
self.p0 = pu0
def eval_cell(self, values, x, ufc_cell):
# if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
# values[0] = 0.0
# else:
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
values[0] = self.p0(r,theta)
values[1] = self.p0(r,theta)
def eval(self, values, x):
# if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
# values = 0.0
# else:
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
values[0] = self.p0(r,theta)
values[1] = self.p0(r,theta)
# def value_shape(self):
# return (1,)
class b0(Expression):
def __init__(self, mesh, bu0):
self.mesh = mesh
self.b0 = bu0
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 0.0
values[1] = 0.0
else:
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
values[0] = self.b0(r, theta)[0]
values[1] = self.b0(r,theta)[1]
# print values
def value_shape(self):
return (2,)
class r0(Expression):
def __init__(self, mesh, element=None):
self.mesh = mesh
def eval(self, values, x):
values[0] = 1.0
# def value_shape(self):
# return ( )
class F_NS(Expression):
def __init__(self, mesh, Laplacian, Advection, gradPres, NS_Couple, params):
self.mesh = mesh
self.Laplacian = Laplacian
self.Advection = Advection
self.gradPres = gradPres
self.NS_Couple = NS_Couple
self.params = params
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 0.0
values[1] = 0.0
else:
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
values[0] = self.Advection(r,theta)[0] - self.params[0]*self.NS_Couple(r,theta)[0]
values[1] = self.Advection(r,theta)[1] - self.params[0]*self.NS_Couple(r,theta)[1]
# ssss
# print values
def value_shape(self):
return (2,)
class F_S(Expression):
def __init__(self, mesh, Laplacian, gradPres, params):
self.mesh = mesh
self.Laplacian = Laplacian
self.gradPres = gradPres
self.params = params
def eval_cell(self, values, x, ufc_cell):
values[0] = 0
values[1] = 0
# print r, theta, self.Laplacian(r,theta)
def value_shape(self):
return (2,)
# params[1]*params[0]*CurlCurl+gradR -params[0]*M_Couple
class F_M(Expression):
def __init__(self, mesh, CurlCurl, gradR ,M_Couple, params):
self.mesh = mesh
self.CurlCurl = CurlCurl
self.gradR = gradR
self.M_Couple = M_Couple
self.params = params
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 0.0
values[1] = 0.0
else:
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
values[0] = - self.params[0]*self.M_Couple(r,theta)[0]
values[1] = - self.params[0]*self.M_Couple(r,theta)[1]
def value_shape(self):
return (2,)
class F_MX(Expression):
def __init__(self, mesh):
self.mesh = mesh
def eval_cell(self, values, x, ufc_cell):
values[0] = 0.0
values[1] = 0.0
def value_shape(self):
return (2,)
class Neumann(Expression):
def __init__(self, mesh, pu0, graduu0, params, n):
self.mesh = mesh
self.p0 = pu0
self.gradu0 = graduu0
self.params = params
self.n = n
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 2.0
values[1] = 0.0
else:
# print x[0], x[1]
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
# cell = Cell(self.mesh, ufc_cell.index)
# print ufc_cell
# n = cell.normal(ufc_cell.local_facet)
# n = FacetNormal(self.mesh)
# print self.n
# sss
values[0] = (self.p0(r,theta) - self.params[0]*self.gradu0(r,theta)[021])
# print -(self.p0(r,theta) - self.params[0]*self.gradu0(r,theta))
values[1] = -(self.params[0]*self.gradu0(r,theta)[1])
def value_shape(self):
return (2,)
# class NeumannGrad(Expression):
# def __init__(self, mesh, pu0, graduu0, params, n):
# self.mesh = mesh
# self.p0 = pu0
# self.gradu0 = graduu0
# self.params = params
# self.n = n
# def eval_cell(self, values, x, ufc_cell):
# if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
# values[0] = 2.0
# values[1] = 0.0
# else:
# # print x[0], x[1]
# r = sqrt(x[0]**2 + x[1]**2)
# theta = np.arctan2(x[1],x[0])
# if theta < 0:
# theta += 2*np.pi
# # cell = Cell(self.mesh, ufc_cell.index)
# # print ufc_cell
# # n = cell.normal(ufc_cell.local_facet)
# # n = FacetNormal(self.mesh)
# # print self.n
# # sss
# values[0] = -(self.p0(r,theta) - self.params[0]*self.gradu0(r,theta)[0])
# # print -(self.p0(r,theta) - self.params[0]*self.gradu0(r,theta))
# values[1] = (self.params[0]*self.gradu0(r,theta)[1])
# def value_shape(self):
# return (2,)
u0 = u0(mesh, uu0)
p0 = p0(mesh, pu0)
p0vec = p0Vec(mesh, pu0)
b0 = b0(mesh, bu0)
r0 = r0(mesh)
F_NS = F_NS(mesh, Laplacian, Advection, gradPres, NS_Couple, params)
F_M = F_M(mesh, CurlCurl, gradR, M_Couple, params)
F_MX = F_MX(mesh)
F_S = F_S(mesh, Laplacian, gradPres, params)
gradu0 = gradu0(mesh, graduu0)
Neumann = Neumann(mesh, pu0, graduu0, params, FacetNormal(mesh))
# NeumannGrad = NeumannGrad(mesh, p0, graduu0, params, FacetNormal(mesh))
return u0, p0, b0, r0, F_NS, F_M, F_MX, F_S, gradu0, Neumann, p0vec
def ReadInSol():
tic()
velocity = open('Solution/velocity.t','r')
magnetic = open('Solution/magnetic.t','r')
pressure = open('Solution/pressure.t','r')
advection = open('Solution/advection.t','r')
laplacian = open('Solution/laplacian.t','r')
pressureGrad = open('Solution/pressureGrad.t','r')
curlcurl = open('Solution/curlcurl.t','r')
Mcouple = open('Solution/Mcouple.t','r')
NScouple = open('Solution/NScouple.t','r')
u = velocity.readline()
v = velocity.readline()
b = magnetic.readline()
d = magnetic.readline()
p = pressure.readline()
L1 = laplacian.readline()
L2 = laplacian.readline()
A1 = advection.readline()
A2 = advection.readline()
P1 = pressureGrad.readline()
P2 = pressureGrad.readline()
C1 = curlcurl.readline()
C2 = curlcurl.readline()
M1 = Mcouple.readline()
M2 = Mcouple.readline()
NS1 = NScouple.readline()
NS2 = NScouple.readline()
uu0 = Expression(((u),(v)))
ub0 = Expression((str((u)).replace('atan2(x[1], x[0])','(atan2(x[1], x[0])+2*pi)'),str((v)).replace('atan2(x[1], x[0])','(atan2(x[1], x[0])+2*pi)')))
pu0 = Expression((p))
pb0 = Expression(str((p)).replace('atan2(x[1], x[0])','(atan2(x[1], x[0])+2*pi)'))
bu0 = Expression(((b),(d)))
bb0 = Expression((str((b)).replace('atan2(x[1], x[0])','(atan2(x[1], x[0])+2*pi)'),str((d)).replace('atan2(x[1], x[0])','(atan2(x[1], x[0])+2*pi)')))
ru0 = Expression('0.0')
Laplacian = Expression(((L1),(L2)))
Advection = Expression(((A1),(A2)))
gradPres = Expression(((P1),(P2)))
CurlCurl = Expression(((C1),(C2)))
gradR = Expression(('0.0','0.0'))
NS_Couple = Expression(((NS1),(NS2)))
M_Couple = Expression(((M1),(M2)))
print ' ', toc()
ssss
return uu0, ub0, pu0, pb0, bu0, bb0, ru0, Laplacian, Advection, gradPres, CurlCurl, gradR, NS_Couple, M_Couple
def SolutionSetUp():
tic()
l = 0.54448373678246
omega = (3./2)*np.pi
z = sy.symbols('z')
x = sy.symbols('x[0]')
y = sy.symbols('x[1]')
rho = sy.sqrt(x**2 + y**2)
phi = sy.atan2(y,x)
# looked at all the exact solutions and they seems to be the same as the paper.....
psi = (sy.sin((1+l)*phi)*sy.cos(l*omega))/(1+l) - sy.cos((1+l)*phi) - (sy.sin((1-l)*phi)*sy.cos(l*omega))/(1-l) + sy.cos((1-l)*phi)
psi_prime = polart(psi, x, y)
psi_3prime = polart(polart(psi_prime, x, y), x, y)
u = rho**l*((1+l)*sy.sin(phi)*psi + sy.cos(phi)*psi_prime)
v = rho**l*(-(1+l)*sy.cos(phi)*psi + sy.sin(phi)*psi_prime)
uu0 = Expression((sy.ccode(u),sy.ccode(v)))
ub0 = Expression((str(sy.ccode(u)).replace('atan2(x[1], x[0])','(atan2(x[1], x[0])+2*pi)'),str(sy.ccode(v)).replace('atan2(x[1], x[0])','(atan2(x[1], x[0])+2*pi)')))
p = -rho**(l-1)*((1+l)**2*psi_prime + psi_3prime)/(1-l)
pu0 = Expression(sy.ccode(p))
pb0 = Expression(str(sy.ccode(p)).replace('atan2(x[1], x[0])','(atan2(x[1], x[0])+2*pi)'))
f = rho**(2./3)*sy.sin((2./3)*phi)
b = sy.diff(f,x)
d = sy.diff(f,y)
bu0 = Expression((sy.ccode(b),sy.ccode(d)))
bb0 = Expression((str(sy.ccode(b)).replace('atan2(x[1], x[0])','(atan2(x[1], x[0])+2*pi)'),str(sy.ccode(d)).replace('atan2(x[1], x[0])','(atan2(x[1], x[0])+2*pi)')))
ru0 = Expression('0.0')
#Laplacian
L1 = sy.diff(u,x,x)+sy.diff(u,y,y)
L2 = sy.diff(v,x,x)+sy.diff(v,y,y)
A1 = u*sy.diff(u,x)+v*sy.diff(u,y)
A2 = u*sy.diff(v,x)+v*sy.diff(v,y)
P1 = sy.diff(p,x)
P2 = sy.diff(p,y)
# Curl-curl
C1 = sy.diff(d,x,y) - sy.diff(b,y,y)
C2 = sy.diff(b,x,y) - sy.diff(d,x,x)
NS1 = -d*(sy.diff(d,x)-sy.diff(b,y))
NS2 = b*(sy.diff(d,x)-sy.diff(b,y))
M1 = sy.diff(u*d-v*b,y)
M2 = -sy.diff(u*d-v*b,x)
print ' ', toc()
# graduu0 = Expression(sy.ccode(sy.diff(u, rho) + (1./rho)*sy.diff(u, phi)))
# graduu0 = Expression((sy.ccode(sy.diff(u, rho)),sy.ccode(sy.diff(v, rho))))
tic()
Laplacian = Expression((sy.ccode(L1),sy.ccode(L2)))
Advection = Expression((sy.ccode(A1),sy.ccode(A2)))
gradPres = Expression((sy.ccode(P1),sy.ccode(P2)))
CurlCurl = Expression((sy.ccode(C1),sy.ccode(C2)))
gradR = Expression(('0.0','0.0'))
NS_Couple = Expression((sy.ccode(NS1),sy.ccode(NS2)))
M_Couple = Expression((sy.ccode(M1),sy.ccode(M2)))
print ' ', toc()
return uu0, ub0, pu0, pb0, bu0, bb0, ru0, Laplacian, Advection, gradPres, CurlCurl, gradR, NS_Couple, M_Couple
def SolutionMeshSetup(mesh, params,uu0, ub0, pu0, pb0, bu0, bb0, ru0, Laplacian, Advection, gradPres, CurlCurl, gradR, NS_Couple, M_Couple):
class u0(Expression):
def __init__(self, mesh, uu0, ub0):
self.mesh = mesh
self.u0 = uu0
self.b0 = ub0
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 0.0
values[1] = 0.0
else:
if x[1] < 0:
values[0] = self.b0(x[0], x[1])[0]
values[1] = self.b0(x[0], x[1])[1]
else:
values[0] = self.u0(x[0], x[1])[0]
values[1] = self.u0(x[0], x[1])[1]
def value_shape(self):
return (2,)
class p0(Expression):
def __init__(self, mesh, pu0, pb0):
self.mesh = mesh
self.p0 = pu0
self.b0 = pb0
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 0.0
else:
if x[1] < 0:
values[0] = self.b0(x[0], x[1])
else:
values[0] = self.p0(x[0], x[1])
class b0(Expression):
def __init__(self, mesh, bu0, bb0):
self.mesh = mesh
self.b0 = bu0
self.bb0 = bb0
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 0.0
values[1] = 0.0
else:
if x[1] < 0:
values[0] = self.bb0(x[0], x[1])[0]
values[1] = self.bb0(x[0], x[1])[1]
else:
values[0] = self.b0(x[0], x[1])[0]
values[1] = self.b0(x[0], x[1])[1]
# print values
def value_shape(self):
return (2,)
class bNone(Expression):
def __init__(self, mesh, bu0, bb0):
self.mesh = mesh
self.b0 = bu0
self.bb0 = bb0
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 0.0
values[1] = 0.0
else:
if x[1] < 0:
values[0] = 1.
values[1] = 0.0
else:
values[0] = 0.0
values[1] = 1.
# print values
def value_shape(self):
return (2,)
def value_shape(self):
return (2,)
class r0(Expression):
def __init__(self, mesh, element=None):
self.mesh = mesh
def eval(self, values, x):
values[0] = 1.0
# def value_shape(self):
# return ( )
class F_NS(Expression):
def __init__(self, mesh, Laplacian, Advection, gradPres, NS_Couple, params):
self.mesh = mesh
self.Laplacian = Laplacian
self.Advection = Advection
self.gradPres = gradPres
self.NS_Couple = NS_Couple
self.params = params
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 0.0
values[1] = 0.0
else:
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
values[0] = self.Advection(r,theta)[0] - self.params[0]*self.NS_Couple(r,theta)[0]
values[1] = self.Advection(r,theta)[1] - self.params[0]*self.NS_Couple(r,theta)[1]
# ssss
# print values
def value_shape(self):
return (2,)
class F_S(Expression):
def __init__(self, mesh, Laplacian, gradPres, params):
self.mesh = mesh
self.Laplacian = Laplacian
self.gradPres = gradPres
self.params = params
def eval_cell(self, values, x, ufc_cell):
values[0] = 0
values[1] = 0
# print r, theta, self.Laplacian(r,theta)
def value_shape(self):
return (2,)
# params[1]*params[0]*CurlCurl+gradR -params[0]*M_Couple
class F_M(Expression):
def __init__(self, mesh, CurlCurl, gradR ,M_Couple, params):
self.mesh = mesh
self.CurlCurl = CurlCurl
self.gradR = gradR
self.M_Couple = M_Couple
self.params = params
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 0.0
values[1] = 0.0
else:
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
values[0] = - self.params[0]*self.M_Couple(r,theta)[0]
values[1] = - self.params[0]*self.M_Couple(r,theta)[1]
def value_shape(self):
return (2,)
class F_MX(Expression):
def __init__(self, mesh):
self.mesh = mesh
def eval_cell(self, values, x, ufc_cell):
values[0] = 0.0
values[1] = 0.0
def value_shape(self):
return (2,)
class Neumann(Expression):
def __init__(self, mesh, pu0, graduu0, params, n):
self.mesh = mesh
self.p0 = pu0
self.gradu0 = graduu0
self.params = params
self.n = n
def eval_cell(self, values, x, ufc_cell):
if abs(x[0]) < 1e-3 and abs(x[1]) < 1e-3:
values[0] = 2.0
values[1] = 0.0
else:
# print x[0], x[1]
r = sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if theta < 0:
theta += 2*np.pi
# cell = Cell(self.mesh, ufc_cell.index)
# print ufc_cell
# n = cell.normal(ufc_cell.local_facet)
# n = FacetNormal(self.mesh)
# print self.n
# sss
values[0] = (self.p0(r,theta) - self.params[0]*self.gradu0(r,theta)[021])
# print -(self.p0(r,theta) - self.params[0]*self.gradu0(r,theta))
values[1] = -(self.params[0]*self.gradu0(r,theta)[1])
def value_shape(self):
return (2,)
u0 = u0(mesh, uu0, ub0)
p0 = p0(mesh, pu0, pb0)
bNone = bNone(mesh, bu0, bb0)
# p0vec = p0Vec(mesh, pu0)
b0 = b0(mesh, bu0, bb0)
r0 = r0(mesh)
F_NS = F_NS(mesh, Laplacian, Advection, gradPres, NS_Couple, params)
F_M = F_M(mesh, CurlCurl, gradR, M_Couple, params)
F_MX = F_MX(mesh)
F_S = F_S(mesh, Laplacian, gradPres, params)
# gradu0 = gradu0(mesh, graduu0)
# Neumann = Neumann(mesh, pu0, graduu0, params, FacetNormal(mesh))
# NeumannGrad = NeumannGrad(mesh, p0, graduu0, params, FacetNormal(mesh))
return u0, p0, b0, r0, F_NS, F_M, F_MX, F_S, 1, 1, 1, bNone
# Sets up the initial guess for the MHD problem
def Stokes(V, Q, F, u0, p0, gradu0, params,boundaries, domains):
parameters['reorder_dofs_serial'] = False
W = V*Q
IS = MO.IndexSet(W)
mesh = W.mesh()
ds = Measure('ds', domain=mesh, subdomain_data=boundaries)
dx = Measure('dx', domain=mesh)
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
n = FacetNormal(W.mesh())
a11 = params[2]*inner(grad(v), grad(u))*dx('everywhere')
a12 = -div(v)*p*dx('everywhere')
a21 = -div(u)*q*dx('everywhere')
a = a11+a12+a21
L = inner(v, F)*dx('everywhere') #+ inner(gradu0,v)*ds(2)
def boundary(x, on_boundary):
return on_boundary
bcu = DirichletBC(W.sub(0), u0, boundary)
A, b = assemble_system(a, L, bcu)
A, b = CP.Assemble(A, b)
# print b.array
# sss
u = b.duplicate()
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_WORLD)
pc = ksp.getPC()
ksp.setType('preonly')
pc.setType('lu')
OptDB = PETSc.Options()
if __version__ != '1.6.0':
OptDB['pc_factor_mat_solver_package'] = "mumps"
OptDB['pc_factor_mat_ordering_type'] = "rcm"
ksp.setFromOptions()
# print b.array
# bbb
scale = b.norm()
b = b/scale
ksp.setOperators(A,A)
del A
ksp.solve(b,u)
# Mits +=dodim
u = u*scale
u_k = Function(V)
p_k = Function(Q)
u_k.vector()[:] = u.getSubVector(IS[0]).array
p_k.vector()[:] = u.getSubVector(IS[1]).array
ones = Function(Q)
ones.vector()[:]=(0*ones.vector().array()+1)
p_k.vector()[:] += -assemble(p_k*dx('everywhere'))/assemble(ones*dx('everywhere'))
return u_k, p_k
def Maxwell(V, Q, F, b0, r0, params, boundaries, bNone):
parameters['reorder_dofs_serial'] = False
W = V*Q
IS = MO.IndexSet(W)
print params
(b, r) = TrialFunctions(W)
(c, s) = TestFunctions(W)
a11 = params[1]*params[2]*inner(curl(b), curl(c))*dx('everywhere')
a21 = inner(b,grad(s))*dx('everywhere')
a12 = inner(c,grad(r))*dx('everywhere')
L = inner(c, F)*dx('everywhere')
a = a11+a12+a21
def boundary(x, on_boundary):
return on_boundary
bcb1 = DirichletBC(W.sub(0), b0, boundaries,1)
bcb2 = DirichletBC(W.sub(0), Expression(("0.0","0.0")), boundaries,2)
bcb3 = DirichletBC(W.sub(0), bNone, boundaries,2)
bcb4 = DirichletBC(W.sub(0), b0, boundaries,2)
bcr = DirichletBC(W.sub(1), r0, boundary)
bc = [bcb1,bcb2, bcr]
A, b = assemble_system(a, L, bc)
A, b = CP.Assemble(A, b)
u = b.duplicate()
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_WORLD)
pc = ksp.getPC()
ksp.setType('preonly')
pc.setType('lu')
OptDB = PETSc.Options()
if __version__ != '1.6.0':
OptDB['pc_factor_mat_solver_package'] = "mumps"
OptDB['pc_factor_mat_ordering_type'] = "rcm"
ksp.setFromOptions()
scale = b.norm()
b = b/scale
ksp.setOperators(A,A)
del A
ksp.solve(b,u)
u = u*scale
b_k = Function(V)
r_k = Function(Q)
b_k.vector()[:] = u.getSubVector(IS[0]).array
r_k.vector()[:] = u.getSubVector(IS[1]).array
return b_k, r_k
| |
from django.contrib.auth.models import User
from djblets.webapi.testing.decorators import webapi_test_template
from reviewboard.webapi.tests.mixins_extra_data import (ExtraDataItemMixin,
ExtraDataListMixin)
class BaseCommentListMixin(object):
@webapi_test_template
def test_post_with_text_type_markdown(self):
"""Testing the POST <URL> API with text_type=markdown"""
self._test_post_with_text_type('markdown')
@webapi_test_template
def test_post_with_text_type_plain(self):
"""Testing the POST <URL> API with text_type=plain"""
self._test_post_with_text_type('plain')
def _test_post_with_text_type(self, text_type):
comment_text = '`This` is a **test**'
url, mimetype, data, objs = \
self.setup_basic_post_test(self.user, False, None, True)
data['text'] = comment_text
data['text_type'] = text_type
rsp = self.api_post(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], comment_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
class BaseCommentItemMixin(object):
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
if comment.rich_text:
self.assertEqual(item_rsp['rich_text'], 'markdown')
else:
self.assertEqual(item_rsp['rich_text'], 'plain')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_markdown(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='markdown',
expected_text=r'\# `This` is a **test**')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_plain(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=plain
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='plain',
expected_text='# `This` is a **test**')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_html(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=html
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='html',
expected_text='<p># <code>This</code> is a '
'<strong>test</strong></p>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_markdown(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='markdown',
expected_text=r'\#<\`This\` is a \*\*test\*\*>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_plain(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=plain
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='plain',
expected_text='#<`This` is a **test**>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_html(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=html
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='html',
expected_text='#<`This` is a **test**>')
@webapi_test_template
def test_put_with_text_type_markdown_and_text(self):
"""Testing the PUT <URL> API
with text_type=markdown and text specified
"""
self._test_put_with_text_type_and_text('markdown')
@webapi_test_template
def test_put_with_text_type_plain_and_text(self):
"""Testing the PUT <URL> API with text_type=plain and text specified"""
self._test_put_with_text_type_and_text('plain')
@webapi_test_template
def test_put_with_text_type_markdown_and_not_text(self):
"""Testing the PUT <URL> API
with text_type=markdown and text not specified escapes text
"""
self._test_put_with_text_type_and_not_text(
'markdown',
'`Test` **diff** comment',
r'\`Test\` \*\*diff\*\* comment')
@webapi_test_template
def test_put_with_text_type_plain_and_not_text(self):
"""Testing the PUT <URL> API
with text_type=plain and text not specified
"""
self._test_put_with_text_type_and_not_text(
'plain',
r'\`Test\` \*\*diff\*\* comment',
'`Test` **diff** comment')
@webapi_test_template
def test_put_without_text_type_and_escaping_provided_fields(self):
"""Testing the PUT <URL> API
without changing text_type and with escaping provided fields
"""
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
reply_comment.rich_text = True
reply_comment.save()
if 'text_type' in data:
del data['text_type']
data.update({
'text': '`This` is **text**',
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text_type'], 'markdown')
self.assertEqual(comment_rsp['text'], '\\`This\\` is \\*\\*text\\*\\*')
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
@webapi_test_template
def test_put_with_multiple_include_text_types(self):
"""Testing the PUT <URL> API with multiple include-text-types"""
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
data.update({
'include_text_types': 'raw,plain,markdown,html',
'text': 'Foo',
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
@webapi_test_template
def test_put_with_issue_verification_success(self):
"""Testing the PUT <URL> API with issue verification success"""
url, mimetype, data, comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
comment.require_verification = True
comment.save()
rsp = self.api_put(
url,
{'issue_status': 'resolved'},
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
@webapi_test_template
def test_put_with_issue_verification_permission_denied(self):
"""Testing the PUT <URL> API with issue verification permission denied
"""
user = User.objects.get(username='doc')
self.assertNotEqual(user, self.user)
url, mimetype, data, comment, objs = \
self.setup_basic_put_test(user, False, None, True)
comment.require_verification = True
comment.save()
rsp = self.api_put(
url,
{'issue_status': 'resolved'},
expected_status=self.not_owner_status_code)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], self.not_owner_error.code)
def _test_get_with_force_text_type(self, text, rich_text,
force_text_type, expected_text):
url, mimetype, comment = \
self.setup_basic_get_test(self.user, False, None)
comment.text = text
comment.rich_text = rich_text
comment.save()
rsp = self.api_get(url + '?force-text-type=%s' % force_text_type,
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text_type'], force_text_type)
self.assertEqual(comment_rsp['text'], expected_text)
self.assertNotIn('raw_text_fields', comment_rsp)
rsp = self.api_get('%s?force-text-type=%s&include-text-types=raw'
% (url, force_text_type),
expected_mimetype=mimetype)
comment_rsp = rsp[self.resource.item_result_key]
self.assertIn('raw_text_fields', comment_rsp)
self.assertEqual(comment_rsp['raw_text_fields']['text'], text)
def _test_put_with_text_type_and_text(self, text_type):
comment_text = '`Test` **diff** comment'
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
data['text_type'] = text_type
data['text'] = comment_text
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], comment_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
def _test_put_with_text_type_and_not_text(self, text_type, text,
expected_text):
self.assertIn(text_type, ('markdown', 'plain'))
rich_text = (text_type == 'markdown')
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
reply_comment.text = text
reply_comment.rich_text = not rich_text
reply_comment.save()
data['text_type'] = text_type
if 'text' in data:
del data['text']
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], expected_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
class CommentListMixin(ExtraDataListMixin, BaseCommentListMixin):
pass
class CommentItemMixin(ExtraDataItemMixin, BaseCommentItemMixin):
pass
class CommentReplyListMixin(BaseCommentListMixin):
pass
class CommentReplyItemMixin(BaseCommentItemMixin):
pass
| |
import re
import logging
import smtplib
import email.feedparser
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email import header
import tg
from paste.deploy.converters import asbool, asint
from formencode import validators as fev
from pylons import c
from allura.lib.utils import ConfigProxy
from allura.lib import exceptions as exc
from allura.lib import helpers as h
log = logging.getLogger(__name__)
RE_MESSAGE_ID = re.compile(r'<(.*)>')
config = ConfigProxy(
common_suffix='forgemail.domain',
return_path='forgemail.return_path')
EMAIL_VALIDATOR=fev.Email(not_empty=True)
def Header(text, *more_text):
'''Helper to make sure we encode headers properly'''
if isinstance(text, header.Header):
return text
# email.header.Header handles str vs unicode differently
# see http://docs.python.org/library/email.header.html#email.header.Header.append
if type(text) != unicode:
raise TypeError('This must be unicode: %r' % text)
head = header.Header(text)
for m in more_text:
if type(m) != unicode:
raise TypeError('This must be unicode: %r' % text)
head.append(m)
return head
def AddrHeader(fromaddr):
'''Accepts any of:
Header() instance
foo@bar.com
"Foo Bar" <foo@bar.com>
'''
if isinstance(fromaddr, basestring) and ' <' in fromaddr:
name, addr = fromaddr.rsplit(' <', 1)
addr = '<' + addr # restore the char we just split off
addrheader = Header(name, addr)
if str(addrheader).startswith('=?'): # encoding escape chars
# then quoting the name is no longer necessary
name = name.strip('"')
addrheader = Header(name, addr)
else:
addrheader = Header(fromaddr)
return addrheader
def parse_address(addr):
userpart, domain = addr.split('@')
# remove common domain suffix
if not domain.endswith(config.common_suffix):
raise exc.AddressException, 'Unknown domain: ' + domain
domain = domain[:-len(config.common_suffix)]
path = '/'.join(reversed(domain.split('.')))
project, mount_point = h.find_project('/' + path)
if project is None:
raise exc.AddressException, 'Unknown project: ' + domain
if len(mount_point) != 1:
raise exc.AddressException, 'Unknown tool: ' + domain
with h.push_config(c, project=project):
app = project.app_instance(mount_point[0])
if not app:
raise exc.AddressException, 'Unknown tool: ' + domain
return userpart, project, app
def parse_message(data):
# Parse the email to its constituent parts
parser = email.feedparser.FeedParser()
parser.feed(data)
msg = parser.close()
# Extract relevant data
result = {}
result['multipart'] = multipart = msg.is_multipart()
result['headers'] = dict(msg)
result['message_id'] = _parse_message_id(msg.get('Message-ID'))
result['in_reply_to'] = _parse_message_id(msg.get('In-Reply-To'))
result['references'] = _parse_message_id(msg.get('References'))
if result['message_id'] == []:
result['message_id'] = h.gen_message_id()
else:
result['message_id'] = result['message_id'][0]
if multipart:
result['parts'] = []
for part in msg.walk():
dpart = dict(
headers=dict(part),
message_id=result['message_id'],
in_reply_to=result['in_reply_to'],
references=result['references'],
content_type=part.get_content_type(),
filename=part.get_filename(None),
payload=part.get_payload(decode=True))
charset = part.get_content_charset()
if charset:
dpart['payload'] = dpart['payload'].decode(charset)
result['parts'].append(dpart)
else:
result['payload'] = msg.get_payload(decode=True)
charset = msg.get_content_charset()
if charset:
result['payload'] = result['payload'].decode(charset)
return result
def identify_sender(peer, email_address, headers, msg):
from allura import model as M
# Dumb ID -- just look for email address claimed by a particular user
addr = M.EmailAddress.query.get(_id=M.EmailAddress.canonical(email_address))
if addr and addr.claimed_by_user_id:
return addr.claimed_by_user()
addr = M.EmailAddress.query.get(_id=M.EmailAddress.canonical(headers.get('From', '')))
if addr and addr.claimed_by_user_id:
return addr.claimed_by_user()
return M.User.anonymous()
def encode_email_part(content, content_type):
try:
return MIMEText(content.encode('ascii'), content_type, 'ascii')
except:
return MIMEText(content.encode('utf-8'), content_type, 'utf-8')
def make_multipart_message(*parts):
msg = MIMEMultipart('related')
msg.preamble = 'This is a multi-part message in MIME format.'
alt = MIMEMultipart('alternative')
msg.attach(alt)
for part in parts:
alt.attach(part)
return msg
def _parse_message_id(msgid):
if msgid is None: return []
return [ mo.group(1)
for mo in RE_MESSAGE_ID.finditer(msgid) ]
def _parse_smtp_addr(addr):
addr = str(addr)
addrs = _parse_message_id(addr)
if addrs and addrs[0]: return addrs[0]
if '@' in addr: return addr
return u'noreply@in.sf.net'
def isvalid(addr):
'''return True if addr is a (possibly) valid email address, false
otherwise'''
try:
EMAIL_VALIDATOR.to_python(addr, None)
return True
except fev.Invalid:
return False
class SMTPClient(object):
def __init__(self):
self._client = None
def sendmail(self, addrs, fromaddr, reply_to, subject, message_id, in_reply_to, message):
if not addrs: return
# We send one message with multiple envelope recipients, so use a generic To: addr
# It might be nice to refactor to send one message per recipient, and use the actual To: addr
message['To'] = Header(reply_to)
message['From'] = AddrHeader(fromaddr)
message['Reply-To'] = Header(reply_to)
message['Subject'] = Header(subject)
message['Message-ID'] = Header('<' + message_id + u'>')
if in_reply_to:
if not isinstance(in_reply_to, basestring):
raise TypeError('Only strings are supported now, not lists')
message['In-Reply-To'] = Header(u'<%s>' % in_reply_to)
content = message.as_string()
smtp_addrs = map(_parse_smtp_addr, addrs)
smtp_addrs = [ a for a in smtp_addrs if isvalid(a) ]
if not smtp_addrs:
log.warning('No valid addrs in %s, so not sending mail',
map(unicode, addrs))
return
try:
self._client.sendmail(
config.return_path,
smtp_addrs,
content)
except:
self._connect()
self._client.sendmail(
config.return_path,
smtp_addrs,
content)
def _connect(self):
if asbool(tg.config.get('smtp_ssl', False)):
smtp_client = smtplib.SMTP_SSL(
tg.config.get('smtp_server', 'localhost'),
asint(tg.config.get('smtp_port', 25)))
else:
smtp_client = smtplib.SMTP(
tg.config.get('smtp_server', 'localhost'),
asint(tg.config.get('smtp_port', 465)))
if tg.config.get('smtp_user', None):
smtp_client.login(tg.config['smtp_user'], tg.config['smtp_password'])
if asbool(tg.config.get('smtp_tls', False)):
smtp_client.starttls()
self._client = smtp_client
| |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# Lightly modified from version of this script in incubator-parquet-format
from requests.auth import HTTPBasicAuth
import requests
import json
import os
import subprocess
import sys
import urllib2
import textwrap
IBIS_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
PROJECT_NAME = 'ibis'
print "IBIS_HOME = " + IBIS_HOME
# Remote name with the PR
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "origin")
# Remote name where results pushed
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "origin")
GITHUB_BASE = "https://github.com/cloudera/" + PROJECT_NAME + "/pull"
GITHUB_API_BASE = "https://api.github.com/repos/cloudera/" + PROJECT_NAME
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
os.chdir(IBIS_HOME)
GITHUB_USERNAME = os.environ['GITHUB_USER']
import getpass
GITHUB_PASSWORD = getpass.getpass('Enter github.com password for %s:'
% GITHUB_USERNAME)
def get_json(url):
auth = HTTPBasicAuth(GITHUB_USERNAME, GITHUB_PASSWORD)
req = requests.get(url, auth=auth)
return req.json()
def fail(msg):
print msg
clean_up()
sys.exit(-1)
def run_cmd(cmd):
# py2.6 does not have subprocess.check_output
if isinstance(cmd, basestring):
cmd = cmd.split(' ')
popenargs = [cmd]
kwargs = {}
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print "Restoring head pointer to %s" % original_head
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print "Deleting local branch %s" % branch
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body != None:
merge_message_flags += ["-m", '\n'.join(textwrap.wrap(body))]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += [
"-m",
"Closes #%s from %s and squashes the following commits:" % (pr_num, pr_repo_desc)]
for c in commits:
merge_message_flags += ["-m", c]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
run_cmd("git cherry-pick -sx %s" % merge_hash)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
branches = get_json("%s/branches" % GITHUB_API_BASE)
branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
# latest_branch = sorted(branch_names, reverse=True)[0]
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["merged"] is True:
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
merge_commit_desc = run_cmd([
'git', 'log', '--merges', '--first-parent',
'--grep=pull request #%s' % pr_num, '--oneline']).split("\n")[0]
if merge_commit_desc == "":
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
merge_hash = merge_commit_desc[:7]
message = merge_commit_desc[8:]
print "Found: %s" % message
maybe_cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
| |
#!/usr/bin/env python3
import argparse
from pyparsing import Group, Word, Literal, Optional, \
alphanums, alphas, nums, restOfLine, \
Empty, Suppress
def bnf():
comment = "#" + restOfLine
variable_name = Word(alphas, alphanums+"_")
literal = Word(nums)
operator = Word("+-|&", max=1)
comparsion = Literal("!=") | "<=" | "<" | ">=" | ">" | "=="
variable_reference = Suppress("@") + variable_name("address")
literal_reference = Suppress("@") + literal("address")
value = Group(variable_reference
| variable_name
| literal_reference
| literal)
operation = ((value("lhs") + operator("op") + value("rhs"))
| Group("-" + value("negative_val"))
| value("val"))
command = value("name") + Suppress("=") + operation("value")
goto = (Suppress("goto") + variable_name("target")
+ Optional(Suppress("if")
+ operation("value") + comparsion("cmp")
+ Suppress("0")
+ Optional(Suppress("else")
+ variable_name("elsetgt"))))
label = value("anchor") + Suppress(":")
return (command | label | goto | Empty()).ignore(comment)
def asm(string, *args, **kwargs):
return "\n".join(
line.strip() for line in string.split("\n") if line
).format(*args, **kwargs).strip()
def getvar(value, name=None):
val = value.address or value[0]
isnum = type(val) is str and val.isdecimal()
kind = "A" if isnum or value.address else "M"
prefix = name + "_" if name else ""
return {prefix + "address": val, prefix + "kind": kind}
def process_command(stmt):
destination = getvar(stmt.name)
indirection = "A=M" if destination["kind"] is "A" else ""
if stmt.op:
variables = getvar(stmt.lhs, name="lhs")
variables.update(getvar(stmt.rhs, name="rhs"))
return asm("""
@{lhs_address}
D={lhs_kind}
@{rhs_address}
D={rhs_kind}{op}D
@{name}
{ind}
M=D
""",
ind=indirection,
op=stmt.op,
name=destination["address"],
**variables)
elif stmt.value.negative_val:
val = getvar(stmt.value.negative_val)
if destination["address"] == val["address"]:
return asm("""
@{name}
{ind}
M=-{kind}
""", ind=indirection, name=destination["address"], **val)
else:
return asm("""
@{address}
D=-{kind}
@{name}
{ind}
M=D
""", ind=indirection, name=destination["address"], **val)
else:
val = getvar(stmt.value)
if val["kind"] == "A" and val["address"] in ("1", "0", "-1"):
return asm("""
@{name}
{ind}
M={val}
""",
ind=indirection,
name=destination["address"],
val=val["address"])
else:
return asm("""
@{address}
D={kind}
@{name}
{ind}
M=D
""",
ind=indirection,
name=destination["address"],
**getvar(stmt.value))
return ""
def process_jump(stmt):
if stmt.cmp:
operations = {
"<": "JLT",
">": "JGT",
"==": "JEQ",
"<=": "JLE",
">=": "JGE",
"!=": "JNE",
"": "JMP"
}
elsejmp = asm("""
@{target}
0;JMP
""", target=stmt.elsetgt) if stmt.elsetgt else ""
if stmt.val:
var = getvar(stmt.val)
return asm("""
@{address}
D={kind}
@{target}
D;{cmp}
""",
cmp=operations[stmt.cmp],
target=stmt.target,
**var)
elif stmt.negative_val:
var = getvar(stmt.value.negative_val)
return asm("""
@{address}
D=-{kind}
@{target}
D;{cmp}
""",
cmp=operations[stmt.cmp],
target=stmt.target,
**var)
else:
sides = getvar(stmt.lhs, name="lhs")
sides.update(getvar(stmt.rhs, name="rhs"))
return asm("""
@{lhs_address}
D={lhs_kind}
@{rhs_address}
D={rhs_kind}{op}D
@{target}
D;{cmp}
{elsejmp}
""",
cmp=operations[stmt.cmp],
target=stmt.target,
op=stmt.op,
elsejmp=elsejmp,
**sides)
else:
return asm("""
@{target}
0;JMP
""", target=stmt.target)
def process_anchor(stmt):
return asm("""
({anchor})
""", anchor=stmt.anchor[0])
def translate_asm(parser, line):
stmt = parser.parseString(line)
if stmt.name:
new = process_command(stmt)
elif stmt.target:
new = process_jump(stmt)
elif stmt.anchor:
new = process_anchor(stmt)
else: # is a comment
if len(line) >= 2:
return "//" + line[1:].strip()
else:
return ""
return "//{line}\n{new}\n".format(line=line.strip(), new=new)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument("input", help="file to run")
args = argparser.parse_args()
parser = bnf()
with open(args.input) as f:
translated = (translate_asm(parser, line) for line in f if line)
cleanup = (line for line in translated if line)
print("\n".join(cleanup))
if __name__ == '__main__':
main()
| |
from collections import deque
import cv2
import os
import gin
import gym
import numpy as np
import time
import pybullet_envs
from tasks import atari_wrappers
from tasks.base_task import BaseTask
from tasks.cartpole_env import CartPoleSwingUpHarderEnv
class RLTask(BaseTask):
"""RL base task."""
def __init__(self, v=True):
self.env = None
self.render = False
self.step_cnt = 0
self.eval_mode = False
self.verbose = v
def reset_for_rollout(self):
self.step_cnt = 0
def seed(self, seed=None):
return self.env.seed(seed)
def modify_obs(self, obs):
return obs
def modify_action(self, act):
return act
def modify_reward(self, reward, done):
return reward
def modify_done(self, reward, done):
return done
def show_gui(self):
if self.render and hasattr(self.env, 'render'):
return self.env.render()
def close(self):
self.env.close()
def rollout(self, solution, evaluation=False):
self.eval_mode = evaluation
self.reset_for_rollout()
solution.reset()
if hasattr(self, 'register_solution'):
self.register_solution(solution)
start_time = time.time()
obs = self.env.reset()
obs = self.modify_obs(obs)
self.show_gui()
ep_reward = 0
done = False
while not done:
action = solution.get_action(obs)
action = self.modify_action(action)
obs, reward, done, info = self.env.step(action)
obs = self.modify_obs(obs)
reward = self.modify_reward(reward, done)
done = self.modify_done(reward, done)
self.step_cnt += 1
ep_reward += reward
self.show_gui()
time_cost = time.time() - start_time
if self.verbose:
print('Rollout time={0:.2f}s, steps={1}, reward={2:.2f}'.format(
time_cost, self.step_cnt, ep_reward))
return ep_reward
@gin.configurable
class PyBulletTask(RLTask):
def __init__(self, env_name, shuffle_on_reset=False, render=False, v=True):
super(PyBulletTask, self).__init__(v=v)
self.env_name = env_name
self.shuffle_on_reset = shuffle_on_reset
self.perm_ix = 0
self.render = render
self.env = gym.make(self.env_name)
self.perm_ix = np.arange(self.env.observation_space.shape[0])
if self.render:
self.env.render('human')
def reset_for_rollout(self):
self.perm_ix = np.arange(self.env.observation_space.shape[0])
if self.shuffle_on_reset:
np.random.shuffle(self.perm_ix)
if self.verbose:
print('perm_ix: {}'.format(self.perm_ix))
return super(PyBulletTask, self).reset_for_rollout()
def modify_reward(self, reward, done):
if self.eval_mode:
return reward
else:
return max(0, sum(self.env.rewards[1:]))
def modify_obs(self, obs):
return obs[self.perm_ix]
def show_gui(self):
if self.render:
time.sleep(0.01)
return super(PyBulletTask, self).show_gui()
@gin.configurable
class CartPoleSwingUpTask(RLTask):
"""Car-pole swing up task."""
def __init__(self,
shuffle_on_reset=False,
render=False,
v=True,
num_noise_channels=0):
super(CartPoleSwingUpTask, self).__init__(v=v)
self.shuffle_on_reset = shuffle_on_reset
self.perm_ix = 0
self.render = render
self.env = CartPoleSwingUpHarderEnv()
self.perm_ix = np.arange(self.env.observation_space.shape[0])
self.noise_std = 0.1
self.num_noise_channels = num_noise_channels
self.rnd = np.random.RandomState(seed=0)
def seed(self, seed=None):
self.rnd = np.random.RandomState(seed=seed)
return super(CartPoleSwingUpTask, self).seed(seed)
def reset_for_rollout(self):
self.perm_ix = np.arange(self.env.observation_space.shape[0])
if self.shuffle_on_reset:
self.rnd.shuffle(self.perm_ix)
if self.verbose:
print('perm_ix: {}'.format(self.perm_ix))
return super(CartPoleSwingUpTask, self).reset_for_rollout()
def modify_obs(self, obs):
obs = obs[self.perm_ix]
if self.num_noise_channels > 0:
noise_obs = self.rnd.randn(self.num_noise_channels) * self.noise_std
obs = np.concatenate([obs, noise_obs], axis=0)
return obs
@gin.configurable
class CarRacingTask(RLTask):
"""Gym CarRacing-v0 task."""
def __init__(self,
bkg=None,
permute_obs=False,
patch_size=6,
out_of_track_cap=20,
stack_k_frames=0,
render=False):
super(CarRacingTask, self).__init__()
self.permute_obs = permute_obs
self.patch_size = patch_size
self.bkg = bkg
bkg_file = os.path.join(
os.path.dirname(__file__), 'bkg/{}.jpg'.format(self.bkg))
if os.path.exists(bkg_file):
self.bkg = cv2.resize(cv2.imread(bkg_file), (96, 96))[:, :, ::-1]
else:
self.bkg = None
self.original_obs = None
self.shuffled_obs = None
self.obs_perm_ix = np.arange((96 // self.patch_size)**2)
self.rnd = np.random.RandomState(seed=0)
self.solution = None
self.render = render
self._max_steps = 1000
self._neg_reward_cnt = 0
self._neg_reward_cap = out_of_track_cap
self._action_high = np.array([1., 1., 1.])
self._action_low = np.array([-1., 0., 0.])
self.env = gym.make('CarRacing-v0')
self.stack_k_frames = stack_k_frames
if self.stack_k_frames > 0:
self.obs_stack = deque(maxlen=self.stack_k_frames)
def seed(self, seed=None):
self.rnd = np.random.RandomState(seed=seed)
return super(CarRacingTask, self).seed(seed)
def modify_action(self, act):
return (act * (self._action_high - self._action_low) / 2. +
(self._action_high + self._action_low) / 2.)
def reset_for_rollout(self):
self.original_obs = None
self.shuffled_obs = None
self.obs_perm_ix = np.arange((96 // self.patch_size)**2)
if self.permute_obs:
self.rnd.shuffle(self.obs_perm_ix)
if self.stack_k_frames > 0:
self.obs_stack = deque(maxlen=self.stack_k_frames)
self._neg_reward_cnt = 0
return super(CarRacingTask, self).reset_for_rollout()
def modify_done(self, reward, done):
if self.eval_mode:
return done
if reward < 0:
self._neg_reward_cnt += 1
else:
self._neg_reward_cnt = 0
too_many_out_of_tracks = 0 < self._neg_reward_cap < self._neg_reward_cnt
too_many_steps = 0 < self._max_steps <= self.step_cnt
return done or too_many_out_of_tracks or too_many_steps
def shuffle_obs_patches(self, obs):
shuffled_obs = np.zeros_like(obs)
p_size = self.patch_size
num_patches_per_dim = 96 // p_size
for pstart_r in range(num_patches_per_dim):
for pstart_c in range(num_patches_per_dim):
ix = pstart_r * num_patches_per_dim + pstart_c
shuffled_ix = self.obs_perm_ix[ix]
spstart_r = shuffled_ix // num_patches_per_dim
spstart_c = shuffled_ix % num_patches_per_dim
shuffled_obs[
pstart_r * p_size:(pstart_r + 1) * p_size,
pstart_c * p_size:(pstart_c + 1) * p_size
] = obs[
spstart_r * p_size:(spstart_r + 1) * p_size,
spstart_c * p_size:(spstart_c + 1) * p_size
]
return shuffled_obs
def modify_obs(self, obs):
if self.bkg is not None:
mask = ((obs[:, :, 0] == 102) &
(obs[:, :, 1] == 204) &
(obs[:, :, 2] == 102))
mask |= ((obs[:, :, 0] == 102) &
(obs[:, :, 1] == 230) &
(obs[:, :, 2] == 102))
obs[:, :, 0][mask] = self.bkg[:, :, 0][mask]
obs[:, :, 1][mask] = self.bkg[:, :, 1][mask]
obs[:, :, 2][mask] = self.bkg[:, :, 2][mask]
# Keep original and shuffled screens for visualization.
self.original_obs = obs
self.shuffled_obs = obs
if self.permute_obs:
self.shuffled_obs = self.shuffle_obs_patches(obs)
if self.stack_k_frames > 0:
gray_obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
gray_obs[-12:] = 0 # Zero-out the indicator.
if self.permute_obs:
gray_obs = self.shuffle_obs_patches(gray_obs)
while len(self.obs_stack) < self.stack_k_frames:
self.obs_stack.append(gray_obs)
self.obs_stack.append(gray_obs)
obs = np.stack(self.obs_stack)
return obs
else:
return self.shuffled_obs
def register_solution(self, solution):
self.solution = solution
def plot_white_patches(self, img, white_patch_ix):
white_patch = np.ones([self.patch_size, self.patch_size, 3]) * 255
num_patches = 96 // self.patch_size
for ix in white_patch_ix:
row_ix = ix // num_patches
col_ix = ix % num_patches
row_ss = row_ix * self.patch_size
col_ss = col_ix * self.patch_size
row_ee = row_ss + self.patch_size
col_ee = col_ss + self.patch_size
img[row_ss:row_ee, col_ss:col_ee] = (
0.5 * img[row_ss:row_ee, col_ss:col_ee] + 0.5 * white_patch)
return img.astype(np.uint8)
def show_gui(self):
if self.render:
if hasattr(self.solution, 'attended_patch_ix'):
attended_patch_ix = self.solution.attended_patch_ix
else:
attended_patch_ix = None
obs = self.shuffled_obs.copy()
if attended_patch_ix is not None:
obs = self.plot_white_patches(
img=obs, white_patch_ix=attended_patch_ix)
org_obs = self.original_obs.copy()
if attended_patch_ix is not None:
org_obs = self.plot_white_patches(
img=org_obs,
white_patch_ix=[self.obs_perm_ix[i]
for i in attended_patch_ix])
img = np.concatenate([org_obs, obs], axis=1)
img = cv2.resize(img, (800, 400))[:, :, ::-1]
cv2.imshow('render', img)
cv2.waitKey(1)
return super(CarRacingTask, self).show_gui()
@gin.configurable
class PuzzlePongTask(RLTask):
"""Atari Pong."""
def __init__(self,
permute_obs=False,
patch_size=6,
occlusion_ratio=0.,
render=False):
super(PuzzlePongTask, self).__init__()
self.render = render
self.occlusion_ratio = occlusion_ratio
self.env = atari_wrappers.wrap_deepmind(
env=atari_wrappers.make_atari(env_id='PongNoFrameskip-v4'),
episode_life=False,
clip_rewards=False,
flicker=False,
frame_stack=True,
permute_obs=permute_obs,
patch_size=patch_size,
rand_zero_out_ratio=occlusion_ratio,
)
def modify_obs(self, obs):
# Convert from LazyFrames to numpy array.
obs = np.array(obs)
# Uncomment to confirm the env is indeed passing shuffled obs.
# cv2.imshow('Pong debug', cv2.resize(obs[0], (200, 200)))
# cv2.waitKey(1)
if 0. < self.occlusion_ratio < 1.:
return {'obs': obs, 'patches_to_use': self.env.patch_to_keep_ix}
else:
return obs
| |
import os
import re
from pathlib import Path
import pytest
from django.urls import reverse
from rdmo.core.constants import VALUE_TYPE_FILE
from ..models import Project, Value
users = (
('owner', 'owner'),
('manager', 'manager'),
('author', 'author'),
('guest', 'guest'),
('user', 'user'),
('site', 'site'),
('anonymous', None),
)
change_project_permission_map = {
'owner': [1, 2, 3, 4, 5],
'manager': [1, 3, 5],
'api': [1, 2, 3, 4, 5],
'site': [1, 2, 3, 4, 5]
}
projects = [1, 2, 3, 4, 5]
@pytest.mark.parametrize('username,password', users)
def test_project_create_import_get(db, client, username, password):
client.login(username=username, password=password)
url = reverse('project_create_import')
response = client.get(url)
if password:
assert response.status_code == 302
assert response.url == '/projects/'
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
def test_project_create_import_post_error(db, settings, client, username, password):
client.login(username=username, password=password)
url = reverse('project_create_import')
response = client.post(url, {
'method': 'wrong'
})
if password:
assert response.status_code == 400
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
def test_project_create_import_post_upload_file(db, settings, client, username, password):
client.login(username=username, password=password)
url = reverse('project_create_import')
xml_file = os.path.join(settings.BASE_DIR, 'xml', 'project.xml')
with open(xml_file, encoding='utf8') as f:
response = client.post(url, {
'method': 'upload_file',
'uploaded_file': f
})
if password:
assert response.status_code == 200
assert b'Create project from project.xml' in response.content
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
def test_project_create_import_post_upload_file_error(db, settings, client, username, password):
client.login(username=username, password=password)
url = reverse('project_create_import')
xml_file = os.path.join(settings.BASE_DIR, 'xml', 'error.xml')
with open(xml_file, encoding='utf8') as f:
response = client.post(url, {
'method': 'upload_file',
'uploaded_file': f
})
if password:
assert response.status_code == 400
assert b'Files of this type cannot be imported.' in response.content
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
def test_project_create_import_post_upload_file_empty(db, client, username, password):
client.login(username=username, password=password)
url = reverse('project_create_import')
response = client.post(url, {
'method': 'upload_file'
})
if password:
assert response.status_code == 400
assert b'There has been an error with your import.' in response.content
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
def test_project_create_import_post_import_file(db, settings, client, files, username, password):
client.login(username=username, password=password)
projects_count = Project.objects.count()
# upload file
url = reverse('project_create_import')
xml_file = os.path.join(settings.BASE_DIR, 'xml', 'project.xml')
with open(xml_file, encoding='utf8') as f:
response = client.post(url, {
'method': 'upload_file',
'uploaded_file': f
})
if password:
assert response.status_code == 200
# get keys from the response
keys = re.findall(r'name=\"(.*?)\"', response.content.decode())
# import file
data = {key: ['on'] for key in keys}
data.update({'method': 'import_file'})
response = client.post(url, data)
# check if all the files are where are supposed to be
for file_value in Value.objects.filter(value_type=VALUE_TYPE_FILE):
assert Path(settings.MEDIA_ROOT).joinpath(file_value.file.name).exists()
# assert that the project exists and that there are values
if password:
project = Project.objects.order_by('updated').last()
assert response.status_code == 302
assert response.url == '/projects/{}/'.format(project.pk)
# a new project, new values values
assert Project.objects.count() == projects_count + 1
assert project.values.count() > 0
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
# no new project was created
assert Project.objects.count() == projects_count
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
def test_project_create_import_post_empty(db, settings, client, username, password):
client.login(username=username, password=password)
projects_count = Project.objects.count()
# upload file
url = reverse('project_create_import')
xml_file = os.path.join(settings.BASE_DIR, 'xml', 'project.xml')
with open(xml_file, encoding='utf8') as f:
response = client.post(url, {
'method': 'upload_file',
'uploaded_file': f
})
if password:
assert response.status_code == 200
response = client.post(url, {
'method': 'import_file'
})
# check if all the files are where are supposed to be
for file_value in Value.objects.filter(value_type=VALUE_TYPE_FILE):
assert Path(settings.MEDIA_ROOT).joinpath(file_value.file.name).exists()
# assert that the project exists, but that there are not values
if password:
new_project = Project.objects.order_by('updated').last()
assert response.status_code == 302
assert response.url == '/projects/{}/'.format(new_project.id)
# a new project, but no values
assert Project.objects.count() == projects_count + 1
assert new_project.values.count() == 0
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
# no new project was created
assert Project.objects.count() == projects_count
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
def test_project_create_import_post_import_project(db, settings, client, username, password):
client.login(username=username, password=password)
url = reverse('project_create_import')
response = client.post(url, {
'method': 'import_project'
})
if password:
assert response.status_code == 400
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
| |
import unittest
import time
from binascii import hexlify, unhexlify
import ed25519
from ed25519 import _ed25519 as raw
def flip_bit(s, bit=0, in_byte=-1):
as_bytes = [ord(b) for b in s]
as_bytes[in_byte] = as_bytes[in_byte] ^ (0x01<<bit)
return "".join([chr(b) for b in as_bytes])
# the pure-python demonstration code (on my 2010 MacBookPro) takes 5s to
# generate a public key, 9s to sign, 14s to verify
# the SUPERCOP-ref version we use takes 2ms for keygen, 2ms to sign, and 7ms
# to verify
class Basic(unittest.TestCase):
timer = None
def log(self, msg):
return
now = time.time()
if self.timer is None:
self.timer = now
else:
elapsed = now - self.timer
self.timer = now
print " (%f elapsed)" % elapsed
print msg
def test_version(self):
# just make sure it can be retrieved
ver = ed25519.__version__
self.failUnless(isinstance(ver, type("")))
def test_constants(self):
# the secret key we get from raw.keypair() are 64 bytes long, and
# are mostly the output of a sha512 call. The first 32 bytes are the
# private exponent (random, with a few bits stomped).
self.failUnlessEqual(raw.SECRETKEYBYTES, 64)
# the public key is the encoded public point
self.failUnlessEqual(raw.PUBLICKEYBYTES, 32)
self.failUnlessEqual(raw.SIGNATUREKEYBYTES, 64)
def test_raw(self):
sk_s = "\x00" * 32 # usually urandom(32)
vk_s, skvk_s = raw.publickey(sk_s)
self.failUnlessEqual(len(vk_s), 32)
exp_vks = unhexlify("3b6a27bcceb6a42d62a3a8d02a6f0d73"
"653215771de243a63ac048a18b59da29")
self.failUnlessEqual(vk_s, exp_vks)
self.failUnlessEqual(skvk_s[:32], sk_s)
self.failUnlessEqual(skvk_s[32:], vk_s)
msg = "hello world"
msg_and_sig = raw.sign(msg, skvk_s)
sig = msg_and_sig[:-len(msg)]
self.failUnlessEqual(len(sig), 64)
exp_sig = unhexlify("b0b47780f096ae60bfff8d8e7b19c36b"
"321ae6e69cca972f2ff987ef30f20d29"
"774b53bae404485c4391ddf1b3f37aaa"
"8a9747f984eb0884e8aa533386e73305")
self.failUnlessEqual(sig, exp_sig)
ret = raw.open(sig+msg, vk_s) # don't raise exception
self.failUnlessEqual(ret, msg)
self.failUnlessRaises(raw.BadSignatureError,
raw.open,
sig+msg+".. NOT!", vk_s)
self.failUnlessRaises(raw.BadSignatureError,
raw.open,
sig+flip_bit(msg), vk_s)
self.failUnlessRaises(raw.BadSignatureError,
raw.open,
sig+msg, flip_bit(vk_s))
self.failUnlessRaises(raw.BadSignatureError,
raw.open,
sig+msg, flip_bit(vk_s, in_byte=2))
self.failUnlessRaises(raw.BadSignatureError,
raw.open,
flip_bit(sig)+msg, vk_s)
self.failUnlessRaises(raw.BadSignatureError,
raw.open,
flip_bit(sig, in_byte=33)+msg, vk_s)
def test_keypair(self):
sk, vk = ed25519.create_keypair()
self.failUnless(isinstance(sk, ed25519.SigningKey), sk)
self.failUnless(isinstance(vk, ed25519.VerifyingKey), vk)
sk2, vk2 = ed25519.create_keypair()
self.failIfEqual(hexlify(sk.to_bytes()), hexlify(sk2.to_bytes()))
# you can control the entropy source
def not_so_random(length):
return "4"*length
sk1, vk1 = ed25519.create_keypair(entropy=not_so_random)
self.failUnlessEqual(sk1.to_ascii(encoding="base64"),
"NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ")
self.failUnlessEqual(vk1.to_ascii(encoding="base64"),
"6yzxO/euOl9hQWih+wknLTl3HsS4UjcngV5GbK+O4WM")
sk2, vk2 = ed25519.create_keypair(entropy=not_so_random)
self.failUnlessEqual(sk1.to_ascii(encoding="base64"),
sk2.to_ascii(encoding="base64"))
self.failUnlessEqual(vk1.to_ascii(encoding="base64"),
vk2.to_ascii(encoding="base64"))
def test_publickey(self):
seed = unhexlify("4ba96b0b5303328c7405220598a587c4"
"acb06ed9a9601d149f85400195f1ec3d")
sk = ed25519.SigningKey(seed)
self.failUnlessEqual(hexlify(sk.to_bytes()),
("4ba96b0b5303328c7405220598a587c4"
"acb06ed9a9601d149f85400195f1ec3d"
"a66d161e090652b054740748f059f92a"
"5b731f1c27b05571f6d942e4f8b7b264"))
self.failUnlessEqual(hexlify(sk.to_seed()),
("4ba96b0b5303328c7405220598a587c4"
"acb06ed9a9601d149f85400195f1ec3d"))
self.failUnlessRaises(ValueError,
ed25519.SigningKey, "wrong length")
sk2 = ed25519.SigningKey(seed)
self.failUnlessEqual(sk, sk2)
def test_OOP(self):
sk_s = unhexlify("4ba96b0b5303328c7405220598a587c4"
"acb06ed9a9601d149f85400195f1ec3d"
"a66d161e090652b054740748f059f92a"
"5b731f1c27b05571f6d942e4f8b7b264")
sk = ed25519.SigningKey(sk_s)
self.failUnlessEqual(len(sk.to_bytes()), 64)
self.failUnlessEqual(sk.to_bytes(), sk_s)
sk2_seed = unhexlify("4ba96b0b5303328c7405220598a587c4"
"acb06ed9a9601d149f85400195f1ec3d")
sk2 = ed25519.SigningKey(sk2_seed)
self.failUnlessEqual(sk2.to_bytes(), sk.to_bytes())
vk = sk.get_verifying_key()
self.failUnlessEqual(len(vk.to_bytes()), 32)
exp_vks = unhexlify("a66d161e090652b054740748f059f92a"
"5b731f1c27b05571f6d942e4f8b7b264")
self.failUnlessEqual(vk.to_bytes(), exp_vks)
self.failUnlessEqual(ed25519.VerifyingKey(vk.to_bytes()), vk)
msg = "hello world"
sig = sk.sign(msg)
self.failUnlessEqual(len(sig), 64)
exp_sig = unhexlify("6eaffe94f2972b35158b6aaa9b69c1da"
"97f0896aca29c41b1dd7b32e6c9e2ff6"
"76fc8d8b034709cdcc37d8aeb86bebfb"
"173ace3c319e211ea1d7e8d8884c1808")
self.failUnlessEqual(sig, exp_sig)
self.failUnlessEqual(vk.verify(sig, msg), None) # also, don't throw
self.failUnlessRaises(ed25519.BadSignatureError,
vk.verify, sig, msg+".. NOT!")
def test_object_identity(self):
sk1_s = unhexlify("ef32972ae3f1252a5aa1395347ea008c"
"bd2fed0773a4ea45e2d2d06c8cf8fbd4"
"c024601a9c5b854fb100ff3116cf4f22"
"a311565f027391cb49d3bbe11c44399d")
sk2_s = unhexlify("3d550c158900b4c2922b6656d2f80572"
"89de4ee65043745179685ae7d29b944d"
"672b8a2cb23f9e75e1d46ce249cd9c04"
"68f816f1c734a102822b60e18b41eacd")
sk1a = ed25519.SigningKey(sk1_s)
sk1b = ed25519.SigningKey(sk1_s)
vk1a = sk1a.get_verifying_key()
vk1b = sk1b.get_verifying_key()
sk2 = ed25519.SigningKey(sk2_s)
vk2 = sk2.get_verifying_key()
self.failUnlessEqual(sk1a, sk1b)
self.failIfEqual(sk1a, sk2)
self.failUnlessEqual(vk1a, vk1b)
self.failIfEqual(vk1a, vk2)
self.failIfEqual(sk2, "not a SigningKey")
self.failIfEqual(vk2, "not a VerifyingKey")
def test_prefix(self):
sk1,vk1 = ed25519.create_keypair()
PREFIX = "private0-"
p = sk1.to_bytes(PREFIX)
# that gives us a binary string with a prefix
self.failUnless(p.startswith(PREFIX), repr(p))
sk2 = ed25519.SigningKey(p, prefix=PREFIX)
self.failUnlessEqual(sk1, sk2)
self.failUnlessEqual(repr(sk1.to_bytes()), repr(sk2.to_bytes()))
self.failUnlessRaises(ed25519.BadPrefixError,
ed25519.SigningKey, p, prefix="WRONG-")
# SigningKey.to_seed() can do a prefix too
p = sk1.to_seed(PREFIX)
self.failUnless(p.startswith(PREFIX), repr(p))
sk3 = ed25519.SigningKey(p, prefix=PREFIX)
self.failUnlessEqual(sk1, sk3)
self.failUnlessEqual(repr(sk1.to_bytes()), repr(sk3.to_bytes()))
self.failUnlessRaises(ed25519.BadPrefixError,
ed25519.SigningKey, p, prefix="WRONG-")
# verifying keys can do this too
PREFIX = "public0-"
p = vk1.to_bytes(PREFIX)
self.failUnless(p.startswith(PREFIX), repr(p))
vk2 = ed25519.VerifyingKey(p, prefix=PREFIX)
self.failUnlessEqual(vk1, vk2)
self.failUnlessEqual(repr(vk1.to_bytes()), repr(vk2.to_bytes()))
self.failUnlessRaises(ed25519.BadPrefixError,
ed25519.VerifyingKey, p, prefix="WRONG-")
# and signatures
PREFIX = "sig0-"
p = sk1.sign("msg", PREFIX)
self.failUnless(p.startswith(PREFIX), repr(p))
vk1.verify(p, "msg", PREFIX)
self.failUnlessRaises(ed25519.BadPrefixError,
vk1.verify, p, "msg", prefix="WRONG-")
def test_ascii(self):
b2a = ed25519.to_ascii
a2b = ed25519.from_ascii
for prefix in ("", "prefix-"):
for length in range(0, 100):
b1 = "a"*length
for base in ("base64", "base32", "base16", "hex"):
a = b2a(b1, prefix, base)
b2 = a2b(a, prefix, base)
self.failUnlessEqual(b1, b2)
def test_encoding(self):
sk_s = "\x88" * 32 # usually urandom(32)
sk1 = ed25519.SigningKey(sk_s)
vk1 = sk1.get_verifying_key()
def check1(encoding, expected):
PREFIX = "private0-"
p = sk1.to_ascii(PREFIX, encoding)
self.failUnlessEqual(p, expected)
sk2 = ed25519.SigningKey(p, prefix=PREFIX, encoding=encoding)
self.failUnlessEqual(repr(sk1.to_bytes()), repr(sk2.to_bytes()))
self.failUnlessEqual(sk1, sk2)
check1("base64", "private0-iIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIg")
check1("base32", "private0-rceirceirceirceirceirceirceirceirceirceirceirceircea")
check1("hex", "private0-8888888888888888888888888888888888888888888888888888888888888888")
def check2(encoding, expected):
PREFIX="public0-"
p = vk1.to_ascii(PREFIX, encoding)
self.failUnlessEqual(p, expected)
vk2 = ed25519.VerifyingKey(p, prefix=PREFIX, encoding=encoding)
self.failUnlessEqual(repr(vk1.to_bytes()), repr(vk2.to_bytes()))
self.failUnlessEqual(vk1, vk2)
check2("base64", "public0-skkdlQKuKGMKK6yy4MdFEP/N0yjDNP8+E5PnWy0x59w")
check2("base32", "public0-wjer3ficvyuggcrlvszobr2fcd743uziym2p6pqtsptvwljr47oa")
check2("hex", "public0-b2491d9502ae28630a2bacb2e0c74510ffcdd328c334ff3e1393e75b2d31e7dc")
def check3(encoding, expected):
msg = "msg"
PREFIX="sig0-"
sig = sk1.sign(msg, PREFIX, encoding)
self.failUnlessEqual(sig, expected)
vk1.verify(sig, msg, PREFIX, encoding)
check3("base64", "sig0-MNfdUir6tMlaYQ+/p8KANJ5d+bk8g2al76v5MeJCo6RiywxURda3sU580CyiW2FBG/Q7kDRswgYqxbkQw3o5CQ")
check3("base32", "sig0-gdl52urk7k2mswtbb672pquagspf36nzhsbwnjppvp4tdyscuosgfsymkrc5nn5rjz6nalfclnqucg7uhoidi3gcayvmloiqyn5dsci")
check3("hex", "sig0-30d7dd522afab4c95a610fbfa7c280349e5df9b93c8366a5efabf931e242a3a462cb0c5445d6b7b14e7cd02ca25b61411bf43b90346cc2062ac5b910c37a3909")
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from cntk import output_variable, FreeDimension
from cntk.ops.functions import UserFunction
import numpy as np
import yaml
from utils.rpn.generate_anchors import generate_anchors
from utils.rpn.bbox_transform import bbox_transform_inv, clip_boxes
from utils.nms.nms_wrapper import nms
try:
from config import cfg
except ImportError:
from utils.default_config import cfg
DEBUG = False
class ProposalLayer(UserFunction):
'''
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
'''
def __init__(self, arg1, arg2, arg3, name='ProposalLayer', param_str=None):
super(ProposalLayer, self).__init__([arg1, arg2, arg3], name=name)
self.param_str_ = param_str if param_str is not None else "'feat_stride': 16\n'scales':\n - 8 \n - 16 \n - 32"
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str_)
self._feat_stride = layer_params['feat_stride']
anchor_scales = layer_params.get('scales', (8, 16, 32))
self._anchors = generate_anchors(scales=np.array(anchor_scales))
self._num_anchors = self._anchors.shape[0]
if DEBUG:
print ('feat_stride: {}'.format(self._feat_stride))
print ('anchors:')
print (self._anchors)
def infer_outputs(self):
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
# for CNTK the proposal shape is [4 x roisPerImage], and mirrored in Python
proposalShape = (FreeDimension, 4)
return [output_variable(proposalShape, self.inputs[0].dtype, self.inputs[0].dynamic_axes,
name="rpn_rois_raw", needs_gradient=False)]
def forward(self, arguments, device=None, outputs_to_retain=None):
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
# use potentially different number of proposals for training vs evaluation
if len(outputs_to_retain) == 0:
# print("EVAL")
pre_nms_topN = cfg["TEST"].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg["TEST"].RPN_POST_NMS_TOP_N
nms_thresh = cfg["TEST"].RPN_NMS_THRESH
min_size = cfg["TEST"].RPN_MIN_SIZE
else:
pre_nms_topN = cfg["TRAIN"].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg["TRAIN"].RPN_POST_NMS_TOP_N
nms_thresh = cfg["TRAIN"].RPN_NMS_THRESH
min_size = cfg["TRAIN"].RPN_MIN_SIZE
bottom = arguments
assert bottom[0].shape[0] == 1, \
'Only single item batches are supported'
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs, which we want
scores = bottom[0][:, self._num_anchors:, :, :]
bbox_deltas = bottom[1]
im_info = bottom[2][0]
if DEBUG:
# im_info = (pad_width, pad_height, scaled_image_width, scaled_image_height, orig_img_width, orig_img_height)
# e.g.(1000, 1000, 1000, 600, 500, 300) for an original image of 600x300 that is scaled and padded to 1000x1000
print ('im_size: ({}, {})'.format(im_info[0], im_info[1]))
print ('scaled im_size: ({}, {})'.format(im_info[2], im_info[3]))
print ('original im_size: ({}, {})'.format(im_info[4], im_info[5]))
# 1. Generate proposals from bbox deltas and shifted anchors
height, width = scores.shape[-2:]
if DEBUG:
print ('score map size: {}'.format(scores.shape))
# Enumerate all shifts
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
anchors = self._anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
#
# bbox deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))
# Same story for the scores:
#
# scores are (1, A, H, W) format
# transpose to (1, H, W, A)
# reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
# Convert anchors into proposals via bbox transformations
proposals = bbox_transform_inv(anchors, bbox_deltas)
# 2. clip predicted boxes to image
proposals = clip_boxes(proposals, im_info)
# 3. remove predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale. Original size = im_info[4:6], scaled size = im_info[2:4])
cntk_image_scale = im_info[2] / im_info[4]
keep = _filter_boxes(proposals, min_size * cntk_image_scale)
proposals = proposals[keep, :]
scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
keep = nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# pad with zeros if too few rois were found
num_found_proposals = proposals.shape[0]
if num_found_proposals < post_nms_topN:
if DEBUG:
print("Only {} proposals generated in ProposalLayer".format(num_found_proposals))
proposals_padded = np.zeros(((post_nms_topN,) + proposals.shape[1:]), dtype=np.float32)
proposals_padded[:num_found_proposals, :] = proposals
proposals = proposals_padded
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
# for CNTK: add batch axis to output shape
proposals.shape = (1,) + proposals.shape
return None, proposals
def backward(self, state, root_gradients, variables):
"""This layer does not propagate gradients."""
pass
def clone(self, cloned_inputs):
return ProposalLayer(cloned_inputs[0], cloned_inputs[1], cloned_inputs[2], param_str=self.param_str_)
def serialize(self):
internal_state = {}
internal_state['param_str'] = self.param_str_
return internal_state
@staticmethod
def deserialize(inputs, name, state):
param_str = state['param_str']
return ProposalLayer(inputs[0], inputs[1], inputs[2], name=name, param_str=param_str)
def _filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
if np.isnan(ws[0]):
print('NaN NaN NaN NaN')
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
| |
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.common import config as neutron_cfg
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron import quota
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import timeutils
from quark import allocation_pool
from quark.db import api as db_api
from quark.db import models
from quark import exceptions as q_exc
from quark import network_strategy
from quark.plugin_modules import ip_policies
from quark.plugin_modules import routes
from quark import plugin_views as v
from quark import quota_driver as qdv
from quark import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
STRATEGY = network_strategy.STRATEGY
quark_subnet_opts = [
cfg.BoolOpt('allow_allocation_pool_update',
default=False,
help=_('Controls whether or not to allow allocation_pool'
'updates'))
]
CONF.register_opts(quark_subnet_opts, "QUARK")
ipam_driver = (importutils.import_class(CONF.QUARK.ipam_driver))()
def _validate_subnet_cidr(context, network_id, new_subnet_cidr):
"""Validate the CIDR for a subnet.
Verifies the specified CIDR does not overlap with the ones defined
for the other subnets specified for this network, or with any other
CIDR if overlapping IPs are disabled.
"""
if neutron_cfg.cfg.CONF.allow_overlapping_ips:
return
new_subnet_ipset = netaddr.IPSet([new_subnet_cidr])
# Using admin context here, in case we actually share networks later
subnet_list = db_api.subnet_find(context.elevated(), None, None, None,
False, network_id=network_id,
shared=[False])
for subnet in subnet_list:
if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset):
# don't give out details of the overlapping subnet
err_msg = (_("Requested subnet with cidr: %(cidr)s for "
"network: %(network_id)s overlaps with another "
"subnet") %
{'cidr': new_subnet_cidr,
'network_id': network_id})
LOG.error(_("Validation for CIDR: %(new_cidr)s failed - "
"overlaps with subnet %(subnet_id)s "
"(CIDR: %(cidr)s)"),
{'new_cidr': new_subnet_cidr,
'subnet_id': subnet.id,
'cidr': subnet.cidr})
raise exceptions.InvalidInput(error_message=err_msg)
def create_subnet(context, subnet):
"""Create a subnet.
Create a subnet which represents a range of IP addresses
that can be allocated to devices
: param context: neutron api request context
: param subnet: dictionary describing the subnet, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
"""
LOG.info("create_subnet for tenant %s" % context.tenant_id)
net_id = subnet["subnet"]["network_id"]
with context.session.begin():
net = db_api.network_find(context, None, None, None, False,
id=net_id, scope=db_api.ONE)
if not net:
raise exceptions.NetworkNotFound(net_id=net_id)
sub_attrs = subnet["subnet"]
always_pop = ["enable_dhcp", "ip_version", "first_ip", "last_ip",
"_cidr"]
admin_only = ["segment_id", "do_not_use", "created_at",
"next_auto_assign_ip"]
utils.filter_body(context, sub_attrs, admin_only, always_pop)
_validate_subnet_cidr(context, net_id, sub_attrs["cidr"])
cidr = netaddr.IPNetwork(sub_attrs["cidr"])
err_vals = {'cidr': sub_attrs["cidr"], 'network_id': net_id}
err = _("Requested subnet with cidr: %(cidr)s for "
"network: %(network_id)s. Prefix is too small, must be a "
"larger subnet. A prefix less than /%(prefix)s is required.")
if cidr.version == 6 and cidr.prefixlen > 64:
err_vals["prefix"] = 65
err_msg = err % err_vals
raise exceptions.InvalidInput(error_message=err_msg)
elif cidr.version == 4 and cidr.prefixlen > 30:
err_vals["prefix"] = 31
err_msg = err % err_vals
raise exceptions.InvalidInput(error_message=err_msg)
# Enforce subnet quotas
net_subnets = get_subnets(context,
filters=dict(network_id=net_id))
if not context.is_admin:
v4_count, v6_count = 0, 0
for subnet in net_subnets:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
v6_count += 1
else:
v4_count += 1
if cidr.version == 6:
tenant_quota_v6 = context.session.query(qdv.Quota).filter_by(
tenant_id=context.tenant_id,
resource='v6_subnets_per_network').first()
if tenant_quota_v6 != -1:
quota.QUOTAS.limit_check(
context, context.tenant_id,
v6_subnets_per_network=v6_count + 1)
else:
tenant_quota_v4 = context.session.query(qdv.Quota).filter_by(
tenant_id=context.tenant_id,
resource='v4_subnets_per_network').first()
if tenant_quota_v4 != -1:
quota.QUOTAS.limit_check(
context, context.tenant_id,
v4_subnets_per_network=v4_count + 1)
# See RM981. The default behavior of setting a gateway unless
# explicitly asked to not is no longer desirable.
gateway_ip = utils.pop_param(sub_attrs, "gateway_ip")
dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", [])
host_routes = utils.pop_param(sub_attrs, "host_routes", [])
allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None)
sub_attrs["network"] = net
new_subnet = db_api.subnet_create(context, **sub_attrs)
cidrs = []
alloc_pools = allocation_pool.AllocationPools(sub_attrs["cidr"],
allocation_pools)
if isinstance(allocation_pools, list):
cidrs = alloc_pools.get_policy_cidrs()
quota.QUOTAS.limit_check(
context,
context.tenant_id,
alloc_pools_per_subnet=len(alloc_pools))
ip_policies.ensure_default_policy(cidrs, [new_subnet])
new_subnet["ip_policy"] = db_api.ip_policy_create(context,
exclude=cidrs)
quota.QUOTAS.limit_check(context, context.tenant_id,
routes_per_subnet=len(host_routes))
default_route = None
for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value:
if default_route:
raise q_exc.DuplicateRouteConflict(
subnet_id=new_subnet["id"])
default_route = route
gateway_ip = default_route["nexthop"]
alloc_pools.validate_gateway_excluded(gateway_ip)
new_subnet["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"]))
quota.QUOTAS.limit_check(context, context.tenant_id,
dns_nameservers_per_subnet=len(dns_ips))
for dns_ip in dns_ips:
new_subnet["dns_nameservers"].append(db_api.dns_create(
context, ip=netaddr.IPAddress(dns_ip)))
# if the gateway_ip is IN the cidr for the subnet and NOT excluded by
# policies, we should raise a 409 conflict
if gateway_ip and default_route is None:
alloc_pools.validate_gateway_excluded(gateway_ip)
new_subnet["routes"].append(db_api.route_create(
context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip))
subnet_dict = v._make_subnet_dict(new_subnet)
subnet_dict["gateway_ip"] = gateway_ip
n_rpc.get_notifier("network").info(
context,
"ip_block.create",
dict(tenant_id=subnet_dict["tenant_id"],
ip_block_id=subnet_dict["id"],
created_at=new_subnet["created_at"]))
return subnet_dict
def update_subnet(context, id, subnet):
"""Update values of a subnet.
: param context: neutron api request context
: param id: UUID representing the subnet to update.
: param subnet: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
"""
LOG.info("update_subnet %s for tenant %s" %
(id, context.tenant_id))
with context.session.begin():
subnet_db = db_api.subnet_find(context, None, None, None, False, id=id,
scope=db_api.ONE)
if not subnet_db:
raise exceptions.SubnetNotFound(id=id)
s = subnet["subnet"]
always_pop = ["_cidr", "cidr", "first_ip", "last_ip", "ip_version",
"segment_id", "network_id"]
admin_only = ["do_not_use", "created_at", "tenant_id",
"next_auto_assign_ip", "enable_dhcp"]
utils.filter_body(context, s, admin_only, always_pop)
dns_ips = utils.pop_param(s, "dns_nameservers", [])
host_routes = utils.pop_param(s, "host_routes", [])
gateway_ip = utils.pop_param(s, "gateway_ip", None)
allocation_pools = utils.pop_param(s, "allocation_pools", None)
if not CONF.QUARK.allow_allocation_pool_update:
if allocation_pools:
raise exceptions.BadRequest(
resource="subnets",
msg="Allocation pools cannot be updated.")
alloc_pools = allocation_pool.AllocationPools(
subnet_db["cidr"],
policies=models.IPPolicy.get_ip_policy_cidrs(subnet_db))
else:
alloc_pools = allocation_pool.AllocationPools(subnet_db["cidr"],
allocation_pools)
quota.QUOTAS.limit_check(
context,
context.tenant_id,
alloc_pools_per_subnet=len(alloc_pools))
if gateway_ip:
alloc_pools.validate_gateway_excluded(gateway_ip)
default_route = None
for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value:
default_route = route
break
if default_route is None:
route_model = db_api.route_find(
context, cidr=str(routes.DEFAULT_ROUTE), subnet_id=id,
scope=db_api.ONE)
if route_model:
db_api.route_update(context, route_model,
gateway=gateway_ip)
else:
db_api.route_create(context,
cidr=str(routes.DEFAULT_ROUTE),
gateway=gateway_ip, subnet_id=id)
if dns_ips:
subnet_db["dns_nameservers"] = []
quota.QUOTAS.limit_check(context, context.tenant_id,
dns_nameservers_per_subnet=len(dns_ips))
for dns_ip in dns_ips:
subnet_db["dns_nameservers"].append(db_api.dns_create(
context,
ip=netaddr.IPAddress(dns_ip)))
if host_routes:
subnet_db["routes"] = []
quota.QUOTAS.limit_check(context, context.tenant_id,
routes_per_subnet=len(host_routes))
for route in host_routes:
subnet_db["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"]))
if CONF.QUARK.allow_allocation_pool_update:
if isinstance(allocation_pools, list):
cidrs = alloc_pools.get_policy_cidrs()
ip_policies.ensure_default_policy(cidrs, [subnet_db])
subnet_db["ip_policy"] = db_api.ip_policy_update(
context, subnet_db["ip_policy"], exclude=cidrs)
# invalidate the cache
db_api.subnet_update_set_alloc_pool_cache(context, subnet_db)
subnet = db_api.subnet_update(context, subnet_db, **s)
return v._make_subnet_dict(subnet)
def get_subnet(context, id, fields=None):
"""Retrieve a subnet.
: param context: neutron api request context
: param id: UUID representing the subnet to fetch.
: param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_subnet %s for tenant %s with fields %s" %
(id, context.tenant_id, fields))
subnet = db_api.subnet_find(context, None, None, None, False, id=id,
join_dns=True, join_routes=True,
scope=db_api.ONE)
if not subnet:
raise exceptions.SubnetNotFound(subnet_id=id)
cache = subnet.get("_allocation_pool_cache")
if not cache:
new_cache = subnet.allocation_pools
db_api.subnet_update_set_alloc_pool_cache(context, subnet, new_cache)
return v._make_subnet_dict(subnet)
def get_subnets(context, limit=None, page_reverse=False, sorts=None,
marker=None, filters=None, fields=None):
"""Retrieve a list of subnets.
The contents of the list depends on the identity of the user
making the request (as indicated by the context) as well as any
filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a subnet as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
: param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_subnets for tenant %s with filters %s fields %s" %
(context.tenant_id, filters, fields))
filters = filters or {}
subnets = db_api.subnet_find(context, limit=limit,
page_reverse=page_reverse, sorts=sorts,
marker_obj=marker,
join_dns=True, join_routes=True, **filters)
for subnet in subnets:
cache = subnet.get("_allocation_pool_cache")
if not cache:
db_api.subnet_update_set_alloc_pool_cache(
context, subnet, subnet.allocation_pools)
return v._make_subnets_list(subnets, fields=fields)
def get_subnets_count(context, filters=None):
"""Return the number of subnets.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a network as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
LOG.info("get_subnets_count for tenant %s with filters %s" %
(context.tenant_id, filters))
return db_api.subnet_count_all(context, **filters)
def _delete_subnet(context, subnet):
if subnet.allocated_ips:
raise exceptions.SubnetInUse(subnet_id=subnet["id"])
db_api.subnet_delete(context, subnet)
def delete_subnet(context, id):
"""Delete a subnet.
: param context: neutron api request context
: param id: UUID representing the subnet to delete.
"""
LOG.info("delete_subnet %s for tenant %s" % (id, context.tenant_id))
with context.session.begin():
subnet = db_api.subnet_find(context, id=id, scope=db_api.ONE)
if not subnet:
raise exceptions.SubnetNotFound(subnet_id=id)
payload = dict(tenant_id=subnet["tenant_id"],
ip_block_id=subnet["id"],
created_at=subnet["created_at"],
deleted_at=timeutils.utcnow())
_delete_subnet(context, subnet)
n_rpc.get_notifier("network").info(context, "ip_block.delete", payload)
def diagnose_subnet(context, id, fields):
if not context.is_admin:
raise exceptions.NotAuthorized()
if id == "*":
return {'subnets': get_subnets(context, filters={})}
return {'subnets': get_subnet(context, id)}
| |
#!/usr/bin/env python
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##\author Kevin Watts
from __future__ import with_statement
import roslib
#roslib.load_manifest('pr2_computer_monitor')
import rospy
import traceback
import threading
from threading import Timer
import sys, os, time
from time import sleep
import subprocess
import socket
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
##### monkey-patch to suppress threading error message in python 2.7.3
##### See http://stackoverflow.com/questions/13193278/understand-python-threading-bug
if sys.version_info[:3] == (2, 7, 3):
import threading
threading._DummyThread._Thread__stop = lambda x: 42
#####
low_hd_level = 5
critical_hd_level = 1
hd_temp_warn = 55 #3580, setting to 55C to after checking manual
hd_temp_error = 70 # Above this temperature, hard drives will have serious problems
stat_dict = { 0: 'OK', 1: 'Warning', 2: 'Error' }
temp_dict = { 0: 'OK', 1: 'Hot', 2: 'Critical Hot' }
usage_dict = { 0: 'OK', 1: 'Low Disk Space', 2: 'Very Low Disk Space' }
REMOVABLE = ['/dev/sda'] # Store removable drives so we can ignore if removed
## Connects to hddtemp daemon to get temp, HD make.
def get_hddtemp_data(hostname = 'localhost', port = 7634):
try:
hd_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
hd_sock.connect((hostname, port))
sock_data = ''
while True:
newdat = hd_sock.recv(1024)
if len(newdat) == 0:
break
sock_data = sock_data + newdat
hd_sock.close()
sock_vals = sock_data.split('|')
# Format of output looks like ' | DRIVE | MAKE | TEMP | '
idx = 0
drives = []
makes = []
temps = []
while idx + 5 < len(sock_vals):
this_drive = sock_vals[idx + 1]
this_make = sock_vals[idx + 2]
this_temp = sock_vals[idx + 3]
# Sometimes we get duplicate makes if hard drives are mounted
# to two different points
if this_make in makes:
idx += 5
continue
drives.append(this_drive)
makes.append(this_make)
temps.append(this_temp)
idx += 5
return True, drives, makes, temps
except:
rospy.logerr(traceback.format_exc())
return False, [ 'Exception' ], [ traceback.format_exc() ], [ 0 ]
def update_status_stale(stat, last_update_time):
time_since_update = rospy.get_time() - last_update_time
stale_status = 'OK'
if time_since_update > 20 and time_since_update <= 35:
stale_status = 'Lagging'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.WARN)
if time_since_update > 35:
stale_status = 'Stale'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.ERROR)
stat.values.pop(0)
stat.values.pop(0)
stat.values.insert(0, KeyValue(key = 'Update Status', value = stale_status))
stat.values.insert(1, KeyValue(key = 'Time Since Update', value = str(time_since_update)))
class hd_monitor():
def __init__(self, hostname, diag_hostname, home_dir = ''):
self._mutex = threading.Lock()
self._hostname = hostname
self._no_temp_warn = rospy.get_param('~no_hd_temp_warn', False)
if self._no_temp_warn:
rospy.logwarn('Not warning for HD temperatures is deprecated. This will be removed in D-turtle')
self._home_dir = home_dir
self._diag_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size=10)
self._last_temp_time = 0
self._last_usage_time = 0
self._last_publish_time = 0
self._temp_timer = None
self._usage_timer = None
self._temp_stat = DiagnosticStatus()
self._temp_stat.name = "%s HD Temperature" % diag_hostname
self._temp_stat.level = DiagnosticStatus.ERROR
self._temp_stat.hardware_id = hostname
self._temp_stat.message = 'No Data'
self._temp_stat.values = [ KeyValue(key = 'Update Status', value = 'No Data'),
KeyValue(key = 'Time Since Last Update', value = 'N/A') ]
if self._home_dir != '':
self._usage_stat = DiagnosticStatus()
self._usage_stat.level = DiagnosticStatus.ERROR
self._usage_stat.hardware_id = hostname
self._usage_stat.name = '%s HD Usage' % diag_hostname
self._usage_stat.values = [ KeyValue(key = 'Update Status', value = 'No Data' ),
KeyValue(key = 'Time Since Last Update', value = 'N/A') ]
self.check_disk_usage()
self.check_temps()
## Must have the lock to cancel everything
def cancel_timers(self):
if self._temp_timer:
self._temp_timer.cancel()
self._temp_timer = None
if self._usage_timer:
self._usage_timer.cancel()
self._usage_timer = None
def check_temps(self):
if rospy.is_shutdown():
with self._mutex:
self.cancel_timers()
return
diag_strs = [ KeyValue(key = 'Update Status', value = 'OK' ) ,
KeyValue(key = 'Time Since Last Update', value = '0' ) ]
diag_level = DiagnosticStatus.OK
diag_message = 'OK'
temp_ok, drives, makes, temps = get_hddtemp_data()
for index in range(0, len(drives)):
temp = temps[index]
if not unicode(temp).isnumeric() and drives[index] not in REMOVABLE:
temp_level = DiagnosticStatus.ERROR
temp_ok = False
elif not unicode(temp).isnumeric() and drives[index] in REMOVABLE:
temp_level = DiagnosticStatus.OK
temp = "Removed"
else:
temp_level = DiagnosticStatus.OK
if float(temp) > hd_temp_warn:
temp_level = DiagnosticStatus.WARN
if float(temp) > hd_temp_error:
temp_level = DiagnosticStatus.ERROR
diag_level = max(diag_level, temp_level)
diag_strs.append(KeyValue(key = 'Disk %d Temp Status' % index, value = temp_dict[temp_level]))
diag_strs.append(KeyValue(key = 'Disk %d Mount Pt.' % index, value = drives[index]))
diag_strs.append(KeyValue(key = 'Disk %d Device ID' % index, value = makes[index]))
diag_strs.append(KeyValue(key = 'Disk %d Temp' % index, value = temp))
if not temp_ok:
diag_level = DiagnosticStatus.ERROR
with self._mutex:
self._last_temp_time = rospy.get_time()
self._temp_stat.values = diag_strs
self._temp_stat.level = diag_level
# Give No Data message if we have no reading
self._temp_stat.message = temp_dict[diag_level]
if not temp_ok:
self._temp_stat.message = 'Error'
if self._no_temp_warn and temp_ok:
self._temp_stat.level = DiagnosticStatus.OK
if not rospy.is_shutdown():
self._temp_timer = threading.Timer(10.0, self.check_temps)
self._temp_timer.start()
else:
self.cancel_timers()
def check_disk_usage(self):
if rospy.is_shutdown():
with self._mutex:
self.cancel_timers()
return
diag_vals = [ KeyValue(key = 'Update Status', value = 'OK' ),
KeyValue(key = 'Time Since Last Update', value = '0' ) ]
diag_level = DiagnosticStatus.OK
diag_message = 'OK'
try:
p = subprocess.Popen(["df", "-P", "--block-size=1G", self._home_dir],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
retcode = p.returncode
if (retcode == 0):
diag_vals.append(KeyValue(key = 'Disk Space Reading', value = 'OK'))
row_count = 0
for row in stdout.split('\n'):
if len(row.split()) < 2:
continue
if not unicode(row.split()[1]).isnumeric() or float(row.split()[1]) < 10: # Ignore small drives
continue
row_count += 1
g_available = row.split()[-3]
name = row.split()[0]
size = row.split()[1]
mount_pt = row.split()[-1]
if (float(g_available) > low_hd_level):
level = DiagnosticStatus.OK
elif (float(g_available) > critical_hd_level):
level = DiagnosticStatus.WARN
else:
level = DiagnosticStatus.ERROR
diag_vals.append(KeyValue(
key = 'Disk %d Name' % row_count, value = name))
diag_vals.append(KeyValue(
key = 'Disk %d Available' % row_count, value = g_available))
diag_vals.append(KeyValue(
key = 'Disk %d Size' % row_count, value = size))
diag_vals.append(KeyValue(
key = 'Disk %d Status' % row_count, value = stat_dict[level]))
diag_vals.append(KeyValue(
key = 'Disk %d Mount Point' % row_count, value = mount_pt))
diag_level = max(diag_level, level)
diag_message = usage_dict[diag_level]
else:
diag_vals.append(KeyValue(key = 'Disk Space Reading', value = 'Failed'))
diag_level = DiagnosticStatus.ERROR
diag_message = stat_dict[diag_level]
except:
rospy.logerr(traceback.format_exc())
diag_vals.append(KeyValue(key = 'Disk Space Reading', value = 'Exception'))
diag_vals.append(KeyValue(key = 'Disk Space Ex', value = traceback.format_exc()))
diag_level = DiagnosticStatus.ERROR
diag_message = stat_dict[diag_level]
# Update status
with self._mutex:
self._last_usage_time = rospy.get_time()
self._usage_stat.values = diag_vals
self._usage_stat.message = diag_message
self._usage_stat.level = diag_level
if not rospy.is_shutdown():
self._usage_timer = threading.Timer(5.0, self.check_disk_usage)
self._usage_timer.start()
else:
self.cancel_timers()
def publish_stats(self):
with self._mutex:
update_status_stale(self._temp_stat, self._last_temp_time)
msg = DiagnosticArray()
msg.header.stamp = rospy.get_rostime()
msg.status.append(self._temp_stat)
if self._home_dir != '':
update_status_stale(self._usage_stat, self._last_usage_time)
msg.status.append(self._usage_stat)
if rospy.get_time() - self._last_publish_time > 0.5:
self._diag_pub.publish(msg)
self._last_publish_time = rospy.get_time()
##\todo Need to check HD input/output too using iostat
if __name__ == '__main__':
hostname = socket.gethostname()
import optparse
parser = optparse.OptionParser(usage="usage: hd_monitor.py [--diag-hostname=cX]")
parser.add_option("--diag-hostname", dest="diag_hostname",
help="Computer name in diagnostics output (ex: 'c1')",
metavar="DIAG_HOSTNAME",
action="store", default = hostname)
options, args = parser.parse_args(rospy.myargv())
home_dir = ''
if len(args) > 1:
home_dir = args[1]
try:
rospy.init_node('hd_monitor_%s' % hostname)
except rospy.exceptions.ROSInitException:
print 'HD monitor is unable to initialize node. Master may not be running.'
sys.exit(0)
hd_monitor = hd_monitor(hostname, options.diag_hostname, home_dir)
rate = rospy.Rate(1.0)
try:
while not rospy.is_shutdown():
rate.sleep()
hd_monitor.publish_stats()
except KeyboardInterrupt:
pass
except Exception, e:
traceback.print_exc()
hd_monitor.cancel_timers()
sys.exit(0)
| |
import csv
from collections import defaultdict
import json
import urllib
from datetime import datetime
# from difflib import SequenceMatcher
import codecs
import os
SCRIPT_DIR = os.path.dirname(__file__)
def update_2016_fpl_data():
"""downloads and updates FPL data for 2016-2017 season using fpl api
"""
master_url = 'https://fantasy.premierleague.com/drf/bootstrap-static'
response = urllib.urlopen(master_url)
master_data = json.loads(response.read())
print "master data loaded"
i = 0
for player in master_data['elements']:
player_id = player['id']
print 'pulling data for player %s' % (i)
player_url = 'https://fantasy.premierleague.com/drf/element-summary/%s' % (player_id)
response = urllib.urlopen(player_url)
player_data = json.loads(response.read())
master_data['elements'][i]['player_data'] = player_data
i += 1
data_json = json.dumps(master_data)
with open('fpl_2016_data.json', 'w') as f:
f.write(data_json)
def create_table_dataset():
"""creates a file containing json of form -
{
"Liverpool",
{
"2013": [
{
"week": 1,
"total_points": 0,
"total_goals_scored": 0,
"total_goals_conceded": 0,
"total_clean_sheets": 0,
"at_home": True,
},
...
]
...
},
...
},
"""
with open('england_2013-2015_fixtures.csv', 'r') as read_file:
# create list of fixtures per team
fixtures_dict = defaultdict(list)
next(read_file)
for line in read_file:
entries = [entry.strip().strip('"') for entry in line.split(',')]
date = datetime.strptime(entries[0], '%Y-%m-%d')
season = int(entries[1])
home_team = entries[2]
away_team = entries[3]
home_goals = int(entries[4])
away_goals = int(entries[5])
if home_goals > away_goals:
points_tuple = (3, 0)
elif home_goals == away_goals:
points_tuple = (1, 1)
else:
points_tuple = (0, 3)
home_entry = {
'date': date,
'season': season,
'opponent': away_team,
'is_home': True,
'goals_scored': home_goals,
'goals_conceded': away_goals,
'points_taken': points_tuple[0]
}
away_entry = {
'date': date,
'season': season,
'opponent': home_team,
'is_home': False,
'goals_scored': away_goals,
'goals_conceded': home_goals,
'points_taken': points_tuple[1]
}
fixtures_dict[home_team].append(home_entry)
fixtures_dict[away_team].append(away_entry)
# print fixtures_dict['Liverpool']
teams_dataset_dict = {}
for team, fixtures in fixtures_dict.iteritems():
seasons_dict = defaultdict(list)
sorted_fixtures = sorted(fixtures, key=lambda k: k['date'])
i = 0
total_points = 0
total_clean_sheets = 0
total_goals_scored = 0
total_goals_conceded = 0
previous_season = 2012
for result in sorted_fixtures:
season = result['season']
if previous_season != season:
i = 0
total_points = 0
total_goals_scored = 0
total_goals_conceded = 0
total_clean_sheets = 0
previous_season = season
total_points += result['points_taken']
total_goals_scored += result['goals_scored']
total_goals_conceded += result['goals_conceded']
if result['goals_conceded'] == 0:
total_clean_sheets += 1
i += 1
week = i
entry = {
"week": week,
"total_points": total_points,
"total_goals_scored": total_goals_scored,
"total_goals_conceded": total_goals_conceded,
"total_clean_sheets": total_clean_sheets,
"at_home": result['is_home']
}
seasons_dict[season].append(entry)
teams_dataset_dict[team] = seasons_dict
teams_dataset_json = json.dumps(teams_dataset_dict)
with open('table_data.json', 'w') as f:
f.write(teams_dataset_json)
# print teams_dataset_dict['Liverpool'][2014][37]
def create_players_static_dataset_2014():
"""creates a json list of dicts with following values per player for each player in fpl 2016 db
1:{
'name': name,
'position': position,
'team_name': team_name,
'points_last_season': points_last_season (None if not in last season roster),
'minutes_last_season': minutes_last_season (None if not in 2013 roster),
}
"""
# load 2014 fpl player data
fpl14_player_list = []
with codecs.open('FPL2014/Player_Details.csv', 'r', encoding='utf-8', errors='replace') as f:
next(f)
for line in f:
entries = [entry.strip().strip('"') for entry in line.split(',')]
fpl14_player_list.append(entries)
# load 2013 fpl player data
fpl13_player_list = []
with codecs.open('FPL2013/FPL2013_player_data.csv', 'r', encoding='utf-8', errors='replace') as f:
next(f)
for line in f:
entries = [entry.strip().strip('"') for entry in line.split(',')]
fpl13_player_list.append(entries)
player_dict = defaultdict(dict)
print len(fpl14_player_list)
for player_entry in fpl14_player_list:
# print player_entry
id = int(player_entry[0])
name = player_entry[1]
print name
# name = name.decode('string_escape')
# name = name.encode('utf8')
# print name
team_name = player_entry[2]
position = player_entry[3]
i = 0
index = -1
for player_13 in fpl13_player_list:
if name.lower().strip() == player_13[0].lower().strip():
index = i
i += 1
if index == -1:
points_last_season = None
minutes_last_season = None
else:
matched_player = fpl13_player_list.pop(index)
points_last_season = matched_player[6]
minutes_last_season = matched_player[2]
player_dict[id] = {
'name': name,
'team_name': team_name,
'position': position,
'points_last_season': points_last_season,
'minutes_last_season': minutes_last_season,
}
player_dict_json = json.dumps(player_dict)
with open('2014_static_player_dataset.json', 'w') as f:
f.write(player_dict_json)
def get_average_points_per_minute_dict(players_static_data):
"""returns dict with average points per last season per position
"""
total_pls_goalkeeper = 0.0
total_minls_goalkeeper = 0.0
# num_of_goalkeepers = 0
total_pls_defender = 0.0
total_minls_defender = 0.0
# num_of_defenders = 0
total_pls_midfielder = 0.0
total_minls_midfielder = 0.0
# num_of_midfielders = 0
total_pls_forward = 0.0
total_minls_forward = 0.0
# num_of_forwards = 0
for p_id, stats in players_static_data.iteritems():
if stats['points_last_season']:
if stats['position'] == 'Goalkeeper':
# num_of_goalkeepers += 1
total_pls_goalkeeper += float(stats['points_last_season'])
total_minls_goalkeeper += float(stats['minutes_last_season'])
elif stats['position'] == 'Defender':
# num_of_defenders += 1
total_pls_defender += float(stats['points_last_season'])
total_minls_defender += float(stats['minutes_last_season'])
elif stats['position'] == 'Midfielder':
# num_of_midfielders += 1
total_pls_midfielder += float(stats['points_last_season'])
total_minls_midfielder += float(stats['minutes_last_season'])
elif stats['position'] == 'Forward':
# num_of_forwards += 1
total_pls_forward += float(stats['points_last_season'])
total_minls_forward += float(stats['minutes_last_season'])
else:
print "Something is wrong. Check player static data positions."
average_ppmls_goalkeeper = total_pls_goalkeeper / total_minls_goalkeeper
average_ppmls_defender = total_pls_defender / total_minls_defender
average_ppmls_midfielder = total_pls_midfielder / total_minls_midfielder
average_ppmls_forward = total_pls_forward / total_minls_forward
average_ppmls_dict = {
'Goalkeeper': average_ppmls_goalkeeper,
'Defender': average_ppmls_defender,
'Midfielder': average_ppmls_midfielder,
'Forward': average_ppmls_forward,
}
return average_ppmls_dict
def create_data_points_2014():
"""creates data points of the form
player_id,
last_season_points_per_minutes,
opponent_points_per_game_last_season,
minutes_per_matches_played,
points_per_matches_played,
goals_per_matches_played,
assists_per_matches_played,
yellow_cards_per_matches_played,
red_cards_per_matches_played,
bps_per_matches_played,
price,
avg_points,
avg_minutes,
avg_goals,
avg_assists,
avg_bps,
avg_net_transfers,
avg_price_drop,
team_points_per_match,
team_goals_scored_per_match,
team_goals_conceded_per_match,
opponent_points_per_match,
opponent_goals_scored_per_match,
opponent_goals_conceded_per_match,
is_at_home,
"""
teams_points_per_match_2013 = {
'Arsenal': float(79) / 38,
'Aston Villa': float(38) / 38,
'Burnley': float(33) / 38,
'Chelsea': float(82) / 38,
'Everton': float(72) / 38,
'Crystal Palace': float(45) / 38,
'Hull': float(37) / 38,
'Leicester': float(33) / 38,
'Liverpool': float(84) / 38,
'Man City': float(86) / 38,
'Man Utd': float(64) / 38,
'Newcastle': float(49) / 38,
'QPR': float(33) / 38,
'Southampton': float(56) / 38,
'Spurs': float(69) / 38,
'Stoke': float(50) / 38,
'Sunderland': float(38) / 38,
'Swansea': float(42) / 38,
'West Brom': float(36) / 38,
'West Ham': float(40) / 38,
}
f_path = os.path.join(SCRIPT_DIR, '2014_static_player_dataset.json')
with open(f_path, 'r') as f:
players_static_data = json.load(f)
average_ppmls_dict = get_average_points_per_minute_dict(players_static_data)
f_path = os.path.join(SCRIPT_DIR, 'table_data.json')
with open(f_path, 'r') as f:
table_data = json.load(f)
match_data_dict = defaultdict(dict)
f_path = os.path.join(SCRIPT_DIR, 'FPL2014/Player_Data.csv')
with open(f_path, 'r') as f:
next(f)
for line in f:
entries = [entry.strip().strip('"') for entry in line.split(',')]
# week
week = int(entries[1])
# player id
player_id = entries[0]
# points per minutes last season
minutes_last_season = players_static_data[player_id]['minutes_last_season']
points_last_season = players_static_data[player_id]['points_last_season']
if points_last_season:
minutes_last_season = int(minutes_last_season)
points_last_season = int(points_last_season)
if minutes_last_season != 0:
last_season_points_per_minutes = float(points_last_season) / minutes_last_season
else:
# player present but did not play last season
last_season_points_per_minutes = 0
else:
# new player this season, assign average values for last season
position = players_static_data[player_id]['position']
last_season_points_per_minutes = average_ppmls_dict[position]
# team_points_per_match_last_season
team = players_static_data[player_id]['team_name']
team_points_per_match_last_season = teams_points_per_match_2013[team]
# opponent_points_per_game_last_season
opponent_name = entries[2]
opponent_points_per_match_last_season = teams_points_per_match_2013[opponent_name]
# price
price = entries[-2]
# team_points_per_match
# team_goals_scored_per_match,
# team_goals_conceded_per_match,
# is_at_home,
# print players_static_data[player_id]
team_table_data = table_data[team]["2014"]
aggregate_data = team_table_data[week - 1]
team_points_per_match = float(aggregate_data['total_points']) / (week)
team_goals_scored_per_match = float(aggregate_data['total_goals_scored']) / (week)
team_goals_conceded_per_match = float(aggregate_data['total_goals_conceded']) / (week)
# if player_id == '344':
# print aggregate_data['total_points']
# print team_points_per_match
# print team
# print week
# print team_table_data[week]
is_at_home = team_table_data[week - 1]['at_home']
# opponent_points_per_match,
# opponent_goals_scored_per_match,
# opponent_goals_conceded_per_match,
oppn_table_data = table_data[opponent_name]["2014"]
aggregate_data = oppn_table_data[week - 1]
opponent_points_per_match = float(aggregate_data['total_points']) / (week)
opponent_goals_scored_per_match = float(aggregate_data['total_goals_scored']) / (week)
opponent_goals_conceded_per_match = float(aggregate_data['total_goals_conceded']) / (week)
player_dict = {
'last_season_points_per_minutes': last_season_points_per_minutes,
'team_points_per_match_last_season': team_points_per_match_last_season,
'opponent_points_per_match_last_season': opponent_points_per_match_last_season,
'price': price,
'team_points_per_match': team_points_per_match,
'team_goals_scored_per_match': team_goals_scored_per_match,
'team_goals_conceded_per_match': team_goals_conceded_per_match,
'is_at_home': is_at_home,
'opponent_points_per_match': opponent_points_per_match,
'opponent_goals_scored_per_match': opponent_goals_scored_per_match,
'opponent_goals_conceded_per_match': opponent_goals_conceded_per_match,
'minutes_played': int(entries[4]),
'goals_scored': int(entries[5]),
'assists': int(entries[6]),
'clean_sheets': int(entries[7]),
'goals_conceded': int(entries[8]),
'yellow_cards': int(entries[9]),
'red_cards': int(entries[10]),
'saves': int(entries[11]),
'bps': int(entries[14]),
'net_transfers': int(entries[15]),
'points': int(entries[17]),
}
match_data_dict[player_id][week] = player_dict
# print match_data_dict['20']
# clean player_data
# in case of missing gameweeks, fill with previous gameweek data
# if previous is missing, fill with next gameweek data
deleted_player_list = []
for key, value in match_data_dict.iteritems():
if len(value) < 35:
# print len(value)
deleted_player_list.append(key)
else:
for i in range(1, 39, 1):
if i not in value:
# print str(key)
# print str(i)
try:
match_data_dict[key][i] = match_data_dict[key][i - 1]
except KeyError:
if i + 1 in value:
match_data_dict[key][i] = match_data_dict[key][i + 1]
else:
index = min(value, key=value.get)
match_data_dict[key][i] = match_data_dict[key][index]
for player_id in deleted_player_list:
del match_data_dict[player_id]
# for derived features
data_dict = defaultdict(dict)
for key, value in match_data_dict.iteritems():
total_minutes = 0.0
total_points = 0.0
total_goals_scored = 0.0
total_assists = 0.0
total_goals_conceded = 0.0
total_clean_sheets = 0.0
total_yellow_cards = 0.0
total_red_cards = 0.0
total_saves = 0.0
total_bps = 0.0
total_net_transfers = 0.0
total_matches_played = 0.0
data_dict_per_week = defaultdict(dict)
data_dict[key] = {
'meta': {
'position': players_static_data[key]['position'].lower(),
'team': players_static_data[key]['team_name'],
'name': players_static_data[key]['name']
},
'data': data_dict_per_week,
}
cumulative_stat_dict = {}
for i in range(1, 39, 1):
if i in value:
week_stats = value[i]
total_minutes += week_stats['minutes_played']
total_points += week_stats['points']
total_goals_scored += week_stats['goals_scored']
total_assists += week_stats['assists']
total_goals_conceded += week_stats['goals_conceded']
total_clean_sheets += week_stats['clean_sheets']
total_yellow_cards += week_stats['yellow_cards']
total_red_cards += week_stats['red_cards']
total_saves += week_stats['saves']
total_bps += week_stats['bps']
total_net_transfers += week_stats['net_transfers']
total_matches_played += 1 if week_stats['minutes_played'] > 0 else 0
else:
pass
# minutes_per_match_played,
# points_per_match_played,
# goals_scored_per_match_played,
# assists_per_match_played,
# yellow_cards_per_match_played,
# red_cards_per_match_played,
# bps_per_match_played,
# avg_points_form
# avg_minutes_form,
# avg_goals_form,
# avg_assists_form,
# avg_bps_form,
# avg_net_transfers_form,
# avg_price_drop_form,
dict_entry = {
'total_minutes': total_minutes,
'total_points': total_points,
'total_goals_scored': total_goals_scored,
'total_assists': total_assists,
'total_goals_conceded': total_goals_conceded,
'total_clean_sheets': total_clean_sheets,
'total_yellow_cards': total_yellow_cards,
'total_red_cards': total_red_cards,
'total_saves': total_saves,
'total_bps': total_bps,
'total_net_transfers': total_net_transfers,
'total_matches_played': total_matches_played,
}
cumulative_stat_dict[i] = dict_entry
for i in range(4, 39, 1):
dict_entry = {
'minutes_per_match_played': None,
'points_per_match_played': None,
'goals_scored_per_match_played': None,
'assists_per_match_played': None,
'goals_conceded_per_match_played': None,
'clean_sheets_per_match_played': None,
'yellow_cards_per_match_played': None,
'red_cards_per_match_played': None,
'saves_per_match_played': None,
'bps_per_match_played': None,
'net_transfers_per_match_played': None,
'avg_points_form': None,
'avg_minutes_form': None,
'avg_goals_form': None,
'avg_assists_form': None,
'avg_bps_form': None,
'avg_net_transfers_form': None,
'avg_price_drop_form': None,
}
cumulative_stats = cumulative_stat_dict[i - 1]
total_matches_played = cumulative_stats['total_matches_played']
if total_matches_played == 0:
# not played a single match yet, discard
next
else:
dict_entry = {
'minutes_per_match_played': cumulative_stats['total_minutes'] / total_matches_played,
'points_per_match_played': cumulative_stats['total_points'] / total_matches_played,
'goals_scored_per_match_played': cumulative_stats['total_goals_scored'] / total_matches_played,
'assists_per_match_played': cumulative_stats['total_assists'] / total_matches_played,
'goals_conceded_per_match_played': cumulative_stats['total_goals_conceded'] / total_matches_played,
'clean_sheets_per_match_played': cumulative_stats['total_clean_sheets'] / total_matches_played,
'yellow_cards_per_match_played': cumulative_stats['total_yellow_cards'] / total_matches_played,
'red_cards_per_match_played': cumulative_stats['total_red_cards'] / total_matches_played,
'saves_per_match_played': cumulative_stats['total_saves'] / total_matches_played,
'bps_per_match_played': cumulative_stats['total_bps'] / total_matches_played,
'net_transfers_per_match_played': cumulative_stats['total_net_transfers'] / total_matches_played,
}
# total_matches_played_3_weeks_ago = 0 if i == 4 else cumulative_stat_dict[i - 4]
cumulative_stat_dict_3_weeks_ago = {
'total_minutes': 0,
'total_points': 0,
'total_goals_scored': 0,
'total_assists': 0,
'total_goals_conceded': 0,
'total_clean_sheets': 0,
'total_yellow_cards': 0,
'total_red_cards': 0,
'total_saves': 0,
'total_bps': 0,
'total_net_transfers': 0,
'total_matches_played': 0,
}
if i > 4:
cumulative_stat_dict_3_weeks_ago = cumulative_stat_dict[i - 4]
total_matches_played_3_weeks_ago = cumulative_stat_dict_3_weeks_ago['total_matches_played']
matches_played_in_last_3_weeks = total_matches_played - total_matches_played_3_weeks_ago
if matches_played_in_last_3_weeks > 0:
dict_entry['avg_points_form'] = (
cumulative_stats['total_points'] -
cumulative_stat_dict_3_weeks_ago['total_points']
) / matches_played_in_last_3_weeks
dict_entry['avg_minutes_form'] = (
cumulative_stats['total_minutes'] -
cumulative_stat_dict_3_weeks_ago['total_minutes']
) / matches_played_in_last_3_weeks
dict_entry['avg_goals_scored_form'] = (
cumulative_stats['total_goals_scored'] -
cumulative_stat_dict_3_weeks_ago['total_goals_scored']
) / matches_played_in_last_3_weeks
dict_entry['avg_assists_form'] = (
cumulative_stats['total_assists'] -
cumulative_stat_dict_3_weeks_ago['total_assists']
) / matches_played_in_last_3_weeks
dict_entry['avg_goals_conceded_form'] = (
cumulative_stats['total_goals_conceded'] -
cumulative_stat_dict_3_weeks_ago['total_goals_conceded']
) / matches_played_in_last_3_weeks
dict_entry['avg_clean_sheets_form'] = (
cumulative_stats['total_clean_sheets'] -
cumulative_stat_dict_3_weeks_ago['total_clean_sheets']
) / matches_played_in_last_3_weeks
dict_entry['avg_yellow_cards_form'] = (
cumulative_stats['total_yellow_cards'] -
cumulative_stat_dict_3_weeks_ago['total_yellow_cards']
) / matches_played_in_last_3_weeks
dict_entry['avg_red_cards_form'] = (
cumulative_stats['total_red_cards'] -
cumulative_stat_dict_3_weeks_ago['total_red_cards']
) / matches_played_in_last_3_weeks
dict_entry['avg_saves_form'] = (
cumulative_stats['total_saves'] -
cumulative_stat_dict_3_weeks_ago['total_saves']
) / matches_played_in_last_3_weeks
dict_entry['avg_bps_form'] = (
cumulative_stats['total_bps'] -
cumulative_stat_dict_3_weeks_ago['total_bps']
) / matches_played_in_last_3_weeks
dict_entry['avg_net_transfers_form'] = (
cumulative_stats['total_net_transfers'] -
cumulative_stat_dict_3_weeks_ago['total_net_transfers']
) / matches_played_in_last_3_weeks
else:
# not played for last 3 weeks. put season averages instead
dict_entry['avg_points_form'] = dict_entry['points_per_match_played']
dict_entry['avg_minutes_form'] = dict_entry['minutes_per_match_played']
dict_entry['avg_goals_scored_form'] = dict_entry['goals_scored_per_match_played']
dict_entry['avg_assists_form'] = dict_entry['assists_per_match_played']
dict_entry['avg_goals_conceded_form'] = dict_entry['goals_conceded_per_match_played']
dict_entry['avg_clean_sheets_form'] = dict_entry['clean_sheets_per_match_played']
dict_entry['avg_yellow_cards_form'] = dict_entry['yellow_cards_per_match_played']
dict_entry['avg_red_cards_form'] = dict_entry['red_cards_per_match_played']
dict_entry['avg_saves_form'] = dict_entry['saves_per_match_played']
dict_entry['avg_bps_form'] = dict_entry['bps_per_match_played']
dict_entry['avg_net_transfers_form'] = dict_entry['net_transfers_per_match_played']
dict_entry['last_season_points_per_minutes'] = float(value[i - 1]['last_season_points_per_minutes'])
dict_entry['team_points_per_match_last_season'] = float(value[i - 1]['team_points_per_match_last_season'])
dict_entry['opponent_points_per_match_last_season'] = float(value[i - 1]['opponent_points_per_match_last_season'])
dict_entry['price'] = float(value[i - 1]['price'])
dict_entry['price_change_form'] = float(value[i - 1]['price']) - float(value[i - 3]['price'])
dict_entry['team_points_per_match'] = float(value[i - 1]['team_points_per_match'])
dict_entry['team_goals_scored_per_match'] = float(value[i - 1]['team_goals_scored_per_match'])
dict_entry['team_goals_conceded_per_match'] = float(value[i - 1]['team_goals_conceded_per_match'])
dict_entry['is_at_home'] = 1 if value[i]['is_at_home'] else 0
dict_entry['opponent_points_per_match'] = float(value[i - 1]['opponent_points_per_match'])
dict_entry['opponent_goals_scored_per_match'] = float(value[i - 1]['opponent_goals_scored_per_match'])
dict_entry['opponent_goals_conceded_per_match'] = float(value[i - 1]['opponent_goals_conceded_per_match'])
# print str(key)
data_dict[key]['data'][i] = {
'X': dict_entry,
'Y': {'points_scored': float(value[i]['points'])}
}
f_path = os.path.join(SCRIPT_DIR, 'player_ml_data.json')
with open(f_path, 'w') as f:
f.write(json.dumps(data_dict))
'''
def create_player_dataset_current():
"""creates a json list with following values per player for each player in fpl 2016 db
id,
points_last_season (None if not in last season roster),
minutes_last_season (None if not in 2013 roster),
first_name,
second_name,
position,
team_id,
team_name,
team_shortname,
"""
# load 2106 fpl player data
url = 'https://fantasy.premierleague.com/drf/bootstrap-static'
response = urllib.urlopen(url)
fpl_data = json.load(response)
fpl16_player_data = fpl_data['elements']
# fpl16_team_data = fpl_data['teams']
fpl16_player_position_key = fpl_data['element_types']
position_key_dict = {}
for type_dict in fpl16_player_position_key:
position_key_dict[type_dict['id']] = type_dict['singular_name']
fpl16_team_key = fpl_data['teams']
team_list = []
for team in teams:
fpl16_id = team['id']
different_names = {
'Hull': 'Hull City',
'Leicester': 'Leicester City',
'Man Utd': 'Manchester United',
'Swansea': 'Swansea City',
'Spurs': 'Tottenham Hotspurs',
'Stoke': 'Stoke City',
'West Brom': 'West Bromwich Albion',
'West Ham': 'West Ham United'
}
''''''
absent_in_2014 = [
'Bournemouth',
'Middlesbrough',
'Watford',
]
''''''
team_name = team['name']
fpl16_name = team_name if team_name not in different_names else different_names[team_name]
fpl16_shortname = team['short_name']
team_list.append[fpl16_id, fpl16_name, fpl16_shortname]
# ambiguous_list = []
# match both set of players through name, (if multiple) then position, (if multiple) then team
for player16 in fpl16_player_data:
name16 = (player16['first_name'] + ' ' + player16['second_name']).lower()
matches = []
for player14 in fpl14_player_list:
name14 = player14[1].lower()
match_ratio = SequenceMatcher(None, name16, name14).ratio()
if match_ratio > 0.8:
matches.append({
'id': int(player14[0]),
'name': player14[1],
'team': player14[2],
'position': player14[3],
'match_ratio': match_ratio,
})
if len(matches) == 0:
# no match found
id_in_fpl14 = None
team_fpl14 = None
elif len(matches) == 1:
# exactly 1 match found
id_in_fpl14 = matches[0]['id']
else:
# multiple matched found
sorted_matches = sorted(matches, key=lambda k: k['match_ratio'], reverse=True)
if sorted_matches[0]['match_ratio'] > 0.95 and sorted_matches[1]['match_ratio'] <= 0.95:
# first assumed as perfect match
id_in_fpl14 = sorted_matches[0]['id']
else:
matches_with_same_position = []
for match in sorted_matches:
if position_key_dict[player16['element_type']] == match['position']:
matches_with_same_position.append(match)
number_of_matches = len(matches_with_same_position)
if number_of_matches == 0:
id_in_fpl14 = None
elif number_of_matches == 1:
id_in_fpl14 = matches_with_same_position[0]['id']
else:
# still multiple matches, assign highest
id_in_fpl14 = matches_with_same_position[0]['id']
# ambiguous_list.append({'name16': name16, 'matches': matches_with_same_position})
pass
'''
def load_dataset(position='midfielder'):
f_path = os.path.join(SCRIPT_DIR, 'player_ml_data.json')
with open(f_path, 'r') as f:
data_dict = json.load(f)
X_legend = None
Y_legend = None
X_list = []
Y_list = []
# filtered_dict = {k: v for k, v in data_dict.iteritems() if v['meta']['position'].lower() == position.lower()}
for id, values in data_dict.iteritems():
if values['meta']['position'].lower() == position.lower():
for week, data in values['data'].iteritems():
if not X_legend:
X_legend = [k for k, v in data['X'].iteritems()]
if not Y_legend:
Y_legend = [k for k, v in data['Y'].iteritems()][0]
X_list.append([v for k, v in data['X'].iteritems()])
Y_list.append(data['Y'][Y_legend])
# # write X_legend to file
legend_path = os.path.join(SCRIPT_DIR, 'X_legend.json')
with open(legend_path, 'w') as f:
f.write(json.dumps(X_legend))
return X_list, Y_list, X_legend, Y_legend
def create_dataset_ssv_file(position='midfielder'):
X, Y, X_legend, Y_legend = load_dataset(position=position)
filename = 'fpl_%ss.ssv' % (position.lower())
with open(filename, 'w') as f:
for sample in zip(X, Y):
X_string = ' '.join(map(str, sample[0]))
f.write(X_string + ' ' + str(sample[1]) + '\n')
# one time functions
def create_fixture_history_csv():
with open('england_master.csv', 'r') as read_file, open('england_2013-2015_fixtures.csv', 'w') as write_file:
wr = csv.writer(write_file, quoting=csv.QUOTE_ALL)
wr.writerow(["date", "season", "home", "visitor", "home_goals", "visitor_goals"])
# print len(read_file.readlines())
# next(read_file)
for line in read_file:
entries = [entry.strip().strip('"') for entry in line.split(',')]
# print "season %s - division %s - tier %s" % (entries[1], entries[7], entries[8])
try:
if int(entries[1]) >= 2013 and int(entries[7]) == 1 and int(entries[8]) == 1:
write_list = [entries[0], entries[1], entries[2], entries[3], entries[5], entries[6]]
wr.writerow(write_list)
except ValueError:
pass
| |
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
import pytest
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils._testing import assert_array_equal, assert_array_less
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.base import BaseEstimator
from sklearn.base import clone
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble._weight_boosting import _samme_proba
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn.utils._mocking import NoSampleWeightWrapper
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the diabetes dataset and randomly permute it
diabetes = datasets.load_diabetes()
diabetes.data, diabetes.target = shuffle(
diabetes.data, diabetes.target, random_state=rng
)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array(
[[1, 1e-6, 0], [0.19, 0.6, 0.2], [-999, 0.51, 0.5], [1e-6, 1, 1e-9]]
)
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator:
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = _samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert np.isfinite(samme_proba).all()
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1)))
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_classification_toy(algorithm):
# Check classification on a toy dataset.
clf = AdaBoostClassifier(algorithm=algorithm, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert clf.predict_proba(T).shape == (len(T), 2)
assert clf.decision_function(T).shape == (len(T),)
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ["SAMME", "SAMME.R"]:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert proba.shape[1] == len(classes)
assert clf.decision_function(iris.data).shape[1] == len(classes)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % (alg, score)
# Check we used multiple estimators
assert len(clf.estimators_) > 1
# Check for distinct random states (see issue #7408)
assert len(set(est.random_state for est in clf.estimators_)) == len(
clf.estimators_
)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0, np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
@pytest.mark.parametrize("loss", ["linear", "square", "exponential"])
def test_diabetes(loss):
# Check consistency on dataset diabetes.
reg = AdaBoostRegressor(loss=loss, random_state=0)
reg.fit(diabetes.data, diabetes.target)
score = reg.score(diabetes.data, diabetes.target)
assert score > 0.6
# Check we used multiple estimators
assert len(reg.estimators_) > 1
# Check for distinct random states (see issue #7408)
assert len(set(est.random_state for est in reg.estimators_)) == len(reg.estimators_)
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_staged_predict(algorithm):
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
diabetes_weights = rng.randint(10, size=diabetes.target.shape)
clf = AdaBoostClassifier(algorithm=algorithm, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(iris.data, iris.target, sample_weight=iris_weights)
]
assert len(staged_predictions) == 10
assert_array_almost_equal(predictions, staged_predictions[-1])
assert len(staged_probas) == 10
assert_array_almost_equal(proba, staged_probas[-1])
assert len(staged_scores) == 10
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(diabetes.data, diabetes.target, sample_weight=diabetes_weights)
predictions = clf.predict(diabetes.data)
staged_predictions = [p for p in clf.staged_predict(diabetes.data)]
score = clf.score(diabetes.data, diabetes.target, sample_weight=diabetes_weights)
staged_scores = [
s
for s in clf.staged_score(
diabetes.data, diabetes.target, sample_weight=diabetes_weights
)
]
assert len(staged_predictions) == 10
assert_array_almost_equal(predictions, staged_predictions[-1])
assert len(staged_scores) == 10
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {
"n_estimators": (1, 2),
"base_estimator__max_depth": (1, 2),
"algorithm": ("SAMME", "SAMME.R"),
}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(), random_state=0)
parameters = {"n_estimators": (1, 2), "base_estimator__max_depth": (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(diabetes.data, diabetes.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ["SAMME", "SAMME.R"]:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert type(obj2) == obj.__class__
score2 = obj2.score(iris.data, iris.target)
assert score == score2
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(diabetes.data, diabetes.target)
score = obj.score(diabetes.data, diabetes.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert type(obj2) == obj.__class__
score2 = obj2.score(diabetes.data, diabetes.target)
assert score == score2
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(
n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1,
)
for alg in ["SAMME", "SAMME.R"]:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert importances.shape[0] == 10
assert (importances[:3, np.newaxis] >= importances[3:]).all()
def test_error():
# Test that it gives proper exception on deficient input.
with pytest.raises(ValueError):
AdaBoostClassifier(learning_rate=-1).fit(X, y_class)
with pytest.raises(ValueError):
AdaBoostClassifier(algorithm="foo").fit(X, y_class)
with pytest.raises(ValueError):
AdaBoostClassifier().fit(X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
with pytest.raises(ValueError, match="worse than random"):
clf.fit(X_fail, y_fail)
def test_sample_weights_infinite():
msg = "Sample weights have reached infinite values"
clf = AdaBoostClassifier(n_estimators=30, learning_rate=5.0, algorithm="SAMME")
with pytest.warns(UserWarning, match=msg):
clf.fit(iris.data, iris.target)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super().fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(
n_classes=1, n_samples=15, n_features=5, random_state=42
)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix, dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME",
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME",
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_almost_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse, y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix) for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super().fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(
n_samples=15, n_features=50, n_targets=1, random_state=42
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix, dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(), random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(), random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix) for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert len(boost.estimator_weights_) == len(boost.estimator_errors_)
def test_multidimensional_X():
"""
Check that the AdaBoost estimators can work with n-dimensional
data matrix
"""
rng = np.random.RandomState(0)
X = rng.randn(50, 3, 3)
yc = rng.choice([0, 1], 50)
yr = rng.randn(50)
boost = AdaBoostClassifier(DummyClassifier(strategy="most_frequent"))
boost.fit(X, yc)
boost.predict(X)
boost.predict_proba(X)
boost = AdaBoostRegressor(DummyRegressor())
boost.fit(X, yr)
boost.predict(X)
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_adaboostclassifier_without_sample_weight(algorithm):
X, y = iris.data, iris.target
base_estimator = NoSampleWeightWrapper(DummyClassifier())
clf = AdaBoostClassifier(base_estimator=base_estimator, algorithm=algorithm)
err_msg = "{} doesn't support sample_weight".format(
base_estimator.__class__.__name__
)
with pytest.raises(ValueError, match=err_msg):
clf.fit(X, y)
def test_adaboostregressor_sample_weight():
# check that giving weight will have an influence on the error computed
# for a weak learner
rng = np.random.RandomState(42)
X = np.linspace(0, 100, num=1000)
y = (0.8 * X + 0.2) + (rng.rand(X.shape[0]) * 0.0001)
X = X.reshape(-1, 1)
# add an arbitrary outlier
X[-1] *= 10
y[-1] = 10000
# random_state=0 ensure that the underlying bootstrap will use the outlier
regr_no_outlier = AdaBoostRegressor(
base_estimator=LinearRegression(), n_estimators=1, random_state=0
)
regr_with_weight = clone(regr_no_outlier)
regr_with_outlier = clone(regr_no_outlier)
# fit 3 models:
# - a model containing the outlier
# - a model without the outlier
# - a model containing the outlier but with a null sample-weight
regr_with_outlier.fit(X, y)
regr_no_outlier.fit(X[:-1], y[:-1])
sample_weight = np.ones_like(y)
sample_weight[-1] = 0
regr_with_weight.fit(X, y, sample_weight=sample_weight)
score_with_outlier = regr_with_outlier.score(X[:-1], y[:-1])
score_no_outlier = regr_no_outlier.score(X[:-1], y[:-1])
score_with_weight = regr_with_weight.score(X[:-1], y[:-1])
assert score_with_outlier < score_no_outlier
assert score_with_outlier < score_with_weight
assert score_no_outlier == pytest.approx(score_with_weight)
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_adaboost_consistent_predict(algorithm):
# check that predict_proba and predict give consistent results
# regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/14084
X_train, X_test, y_train, y_test = train_test_split(
*datasets.load_digits(return_X_y=True), random_state=42
)
model = AdaBoostClassifier(algorithm=algorithm, random_state=42)
model.fit(X_train, y_train)
assert_array_equal(
np.argmax(model.predict_proba(X_test), axis=1), model.predict(X_test)
)
@pytest.mark.parametrize(
"model, X, y",
[
(AdaBoostClassifier(), iris.data, iris.target),
(AdaBoostRegressor(), diabetes.data, diabetes.target),
],
)
def test_adaboost_negative_weight_error(model, X, y):
sample_weight = np.ones_like(y)
sample_weight[-1] = -10
err_msg = "sample_weight cannot contain negative weight"
with pytest.raises(ValueError, match=err_msg):
model.fit(X, y, sample_weight=sample_weight)
| |
# encoding: utf-8
# pylint: disable=invalid-name,unused-argument,too-many-arguments
"""
Application database related tasks for Invoke.
Forked from flask-migrate
"""
import argparse
import logging
import os
from ._utils import app_context_task
log = logging.getLogger(__name__) # pylint: disable=invalid-name
try:
from alembic import __version__ as __alembic_version__
from alembic.config import Config as AlembicConfig
from alembic import command
except ImportError:
log.warning("Alembic cannot be imported, so some app.db.* tasks won't be available!")
else:
alembic_version = tuple([int(v) for v in __alembic_version__.split('.')[0:3]])
class Config(AlembicConfig):
"""
Custom config that overwrites template directory.
"""
def get_template_directory(self):
package_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(package_dir, 'db_templates')
def _get_config(directory, x_arg=None, opts=None):
"""
A helper that prepares AlembicConfig instance.
"""
config = Config(os.path.join(directory, 'alembic.ini'))
config.set_main_option('script_location', directory)
if config.cmd_opts is None:
config.cmd_opts = argparse.Namespace()
for opt in opts or []:
setattr(config.cmd_opts, opt, True)
if x_arg is not None:
if not getattr(config.cmd_opts, 'x', None):
setattr(config.cmd_opts, 'x', [x_arg])
else:
config.cmd_opts.x.append(x_arg)
return config
@app_context_task(
help={
'directory': "migration script directory",
'multidb': "Multiple databases migraton",
}
)
def init(context, directory='migrations', multidb=False):
"""Generates a new migration"""
config = Config()
config.set_main_option('script_location', directory)
config.config_file_name = os.path.join(directory, 'alembic.ini')
if multidb:
command.init(config, directory, 'flask-multidb')
else:
command.init(config, directory, 'flask')
@app_context_task(
help={
'rev_id': "Specify a hardcoded revision id instead of generating one",
'version_path': "Specify specific path from config for version file",
'branch_label': "Specify a branch label to apply to the new revision",
'splice': "Allow a non-head revision as the 'head' to splice onto",
'head': "Specify head revision or <branchname>@head to base new revision on",
'sql': "Don't emit SQL to database - dump to standard output instead",
'autogenerate': "Populate revision script with andidate migration operatons, based " \
"on comparison of database to model",
'directory': "migration script directory",
}
)
def revision(context, directory='migrations', message=None, autogenerate=False, sql=False,
head='head', splice=False, branch_label=None, version_path=None, rev_id=None):
"""Create a new revision file."""
config = _get_config(directory)
if alembic_version >= (0, 7, 0):
command.revision(config, message, autogenerate=autogenerate, sql=sql,
head=head, splice=splice, branch_label=branch_label,
version_path=version_path, rev_id=rev_id)
else:
command.revision(config, message, autogenerate=autogenerate, sql=sql)
@app_context_task(
help={
'rev_id': "Specify a hardcoded revision id instead of generating one",
'version_path': "Specify specific path from config for version file",
'branch_label': "Specify a branch label to apply to the new revision",
'splice': "Allow a non-head revision as the 'head' to splice onto",
'head': "Specify head revision or <branchname>@head to base new revision on",
'sql': "Don't emit SQL to database - dump to standard output instead",
'directory': "migration script directory",
}
)
def migrate(context, directory='migrations', message=None, sql=False, head='head', splice=False,
branch_label=None, version_path=None, rev_id=None):
"""Alias for 'revision --autogenerate'"""
config = _get_config(directory, opts=['autogenerate'])
if alembic_version >= (0, 7, 0):
command.revision(config, message, autogenerate=True, sql=sql, head=head,
splice=splice, branch_label=branch_label,
version_path=version_path, rev_id=rev_id)
else:
command.revision(config, message, autogenerate=True, sql=sql)
@app_context_task(
help={
'revision': "revision identifier",
'directory': "migration script directory",
}
)
def edit(context, revision='current', directory='migrations'):
"""Upgrade to a later version"""
if alembic_version >= (0, 8, 0):
config = _get_config(directory)
command.edit(config, revision)
else:
raise RuntimeError('Alembic 0.8.0 or greater is required')
@app_context_task(
help={
'rev_id': "Specify a hardcoded revision id instead of generating one",
'branch_label': "Specify a branch label to apply to the new revision",
'message': "one or more revisions, or 'heads' for all heads",
'directory': "migration script directory",
}
)
def merge(context, directory='migrations', revisions='', message=None, branch_label=None,
rev_id=None):
"""Merge two revisions together. Creates a new migration file"""
if alembic_version >= (0, 7, 0):
config = _get_config(directory)
command.merge(config, revisions, message=message,
branch_label=branch_label, rev_id=rev_id)
else:
raise RuntimeError('Alembic 0.7.0 or greater is required')
@app_context_task(
help={
'tag': "Arbitrary 'tag' name - can be used by custom env.py scripts",
'sql': "Don't emit SQL to database - dump to standard output instead",
'revision': "revision identifier",
'directory': "migration script directory",
'x_arg': "Additional arguments consumed by custom env.py scripts",
}
)
def upgrade(context, directory='migrations', revision='head', sql=False, tag=None, x_arg=None,
app=None):
"""Upgrade to a later version"""
config = _get_config(directory, x_arg=x_arg)
command.upgrade(config, revision, sql=sql, tag=tag)
@app_context_task(
help={
'tag': "Arbitrary 'tag' name - can be used by custom env.py scripts",
'sql': "Don't emit SQL to database - dump to standard output instead",
'revision': "revision identifier",
'directory': "migration script directory",
'x_arg': "Additional arguments consumed by custom env.py scripts",
}
)
def downgrade(context, directory='migrations', revision='-1', sql=False, tag=None, x_arg=None):
"""Revert to a previous version"""
config = _get_config(directory, x_arg=x_arg)
if sql and revision == '-1':
revision = 'head:-1'
command.downgrade(config, revision, sql=sql, tag=tag)
@app_context_task(
help={
'revision': "revision identifier",
'directory': "migration script directory",
}
)
def show(context, directory='migrations', revision='head'):
"""Show the revision denoted by the given symbol."""
if alembic_version >= (0, 7, 0):
config = _get_config(directory)
command.show(config, revision)
else:
raise RuntimeError('Alembic 0.7.0 or greater is required')
@app_context_task(
help={
'verbose': "Use more verbose output",
'rev_range': "Specify a revision range; format is [start]:[end]",
'directory': "migration script directory",
}
)
def history(context, directory='migrations', rev_range=None, verbose=False):
"""List changeset scripts in chronological order."""
config = _get_config(directory)
if alembic_version >= (0, 7, 0):
command.history(config, rev_range, verbose=verbose)
else:
command.history(config, rev_range)
@app_context_task(
help={
'resolve_dependencies': "Treat dependency versions as down revisions",
'verbose': "Use more verbose output",
'directory': "migration script directory",
}
)
def heads(context, directory='migrations', verbose=False, resolve_dependencies=False):
"""Show current available heads in the script directory"""
if alembic_version >= (0, 7, 0):
config = _get_config(directory)
command.heads(config, verbose=verbose,
resolve_dependencies=resolve_dependencies)
else:
raise RuntimeError('Alembic 0.7.0 or greater is required')
@app_context_task(
help={
'verbose': "Use more verbose output",
'directory': "migration script directory",
}
)
def branches(context, directory='migrations', verbose=False):
"""Show current branch points"""
config = _get_config(directory)
if alembic_version >= (0, 7, 0):
command.branches(config, verbose=verbose)
else:
command.branches(config)
@app_context_task(
help={
'head_only': "Deprecated. Use --verbose for additional output",
'verbose': "Use more verbose output",
'directory': "migration script directory",
}
)
def current(context, directory='migrations', verbose=False, head_only=False):
"""Display the current revision for each database."""
config = _get_config(directory)
if alembic_version >= (0, 7, 0):
command.current(config, verbose=verbose, head_only=head_only)
else:
command.current(config)
@app_context_task(
help={
'tag': "Arbitrary 'tag' name - can be used by custom env.py scripts",
'sql': "Don't emit SQL to database - dump to standard output instead",
'revision': "revision identifier",
'directory': "migration script directory",
}
)
def stamp(context, directory='migrations', revision='head', sql=False, tag=None):
"""'stamp' the revision table with the given revision; don't run any
migrations"""
config = _get_config(directory)
command.stamp(config, revision, sql=sql, tag=tag)
@app_context_task
def init_development_data(context, upgrade_db=True, skip_on_failure=False):
"""
Fill a database with development data like default users.
"""
if upgrade_db:
context.invoke_execute(context, 'app.db.upgrade')
log.info("Initializing development data...")
from migrations import initial_development_data
try:
initial_development_data.init()
except AssertionError as exception:
if not skip_on_failure:
log.error("%s", exception)
else:
log.debug(
"The following error was ignored due to the `skip_on_failure` flag: %s",
exception
)
log.info("Initializing development data step is skipped.")
else:
log.info("Fixtures have been successfully applied.")
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing reader datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.contrib.data.python.ops import readers
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class FixedLengthRecordDatasetTestBase(test.TestCase):
"""Base class for setting up and testing FixedLengthRecordDataset."""
def setUp(self):
super(FixedLengthRecordDatasetTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
class ReadBatchFeaturesTestBase(test.TestCase):
"""Base class for setting up and testing `make_batched_feature_dataset`."""
def setUp(self):
super(ReadBatchFeaturesTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def make_batch_feature(self,
filenames,
num_epochs,
batch_size,
label_key=None,
reader_num_threads=1,
parser_num_threads=1,
shuffle=False,
shuffle_seed=None,
drop_final_batch=False):
self.filenames = filenames
self.num_epochs = num_epochs
self.batch_size = batch_size
return readers.make_batched_features_dataset(
file_pattern=self.filenames,
batch_size=self.batch_size,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
"keywords": parsing_ops.VarLenFeature(dtypes.string),
"label": parsing_ops.FixedLenFeature([], dtypes.string),
},
label_key=label_key,
reader=core_readers.TFRecordDataset,
num_epochs=self.num_epochs,
shuffle=shuffle,
shuffle_seed=shuffle_seed,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
drop_final_batch=drop_final_batch)
def _record(self, f, r, l):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[f])),
"record":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[r])),
"keywords":
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=self._get_keywords(f, r))),
"label":
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[compat.as_bytes(l)]))
}))
return example.SerializeToString()
def _get_keywords(self, f, r):
num_keywords = 1 + (f + r) % 2
keywords = []
for index in range(num_keywords):
keywords.append(compat.as_bytes("keyword%d" % index))
return keywords
def _sum_keywords(self, num_files):
sum_keywords = 0
for i in range(num_files):
for j in range(self._num_records):
sum_keywords += 1 + (i + j) % 2
return sum_keywords
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j, "fake-label"))
writer.close()
return filenames
def _run_actual_batch(self, outputs, sess, label_key_provided=False):
if label_key_provided:
# outputs would be a tuple of (feature dict, label)
label_op = outputs[1]
features_op = outputs[0]
else:
features_op = outputs
label_op = features_op["label"]
file_op = features_op["file"]
keywords_indices_op = features_op["keywords"].indices
keywords_values_op = features_op["keywords"].values
keywords_dense_shape_op = features_op["keywords"].dense_shape
record_op = features_op["record"]
return sess.run([
file_op, keywords_indices_op, keywords_values_op,
keywords_dense_shape_op, record_op, label_op
])
def _next_actual_batch(self, sess, label_key_provided=False):
return self._run_actual_batch(self.outputs, sess, label_key_provided)
def _interleave(self, iterators, cycle_length):
pending_iterators = iterators
open_iterators = []
num_open = 0
for i in range(cycle_length):
if pending_iterators:
open_iterators.append(pending_iterators.pop(0))
num_open += 1
while num_open:
for i in range(min(cycle_length, len(open_iterators))):
if open_iterators[i] is None:
continue
try:
yield next(open_iterators[i])
except StopIteration:
if pending_iterators:
open_iterators[i] = pending_iterators.pop(0)
else:
open_iterators[i] = None
num_open -= 1
def _next_expected_batch(self,
file_indices,
batch_size,
num_epochs,
cycle_length=1):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i, compat.as_bytes("fake-label")
def _next_record_interleaved(file_indices, cycle_length):
return self._interleave([_next_record([i]) for i in file_indices],
cycle_length)
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
label_batch = []
for _ in range(num_epochs):
if cycle_length == 1:
next_records = _next_record(file_indices)
else:
next_records = _next_record_interleaved(file_indices, cycle_length)
for record in next_records:
f = record[0]
r = record[1]
label_batch.append(record[2])
file_batch.append(f)
record_batch.append(r)
keywords = self._get_keywords(f, r)
keywords_batch_values.extend(keywords)
keywords_batch_indices.extend(
[[batch_index, i] for i in range(len(keywords))])
batch_index += 1
keywords_batch_max_len = max(keywords_batch_max_len, len(keywords))
if len(file_batch) == batch_size:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[batch_size, keywords_batch_max_len], record_batch, label_batch
]
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
label_batch = []
if file_batch:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[len(file_batch), keywords_batch_max_len], record_batch, label_batch
]
def verify_records(self,
sess,
batch_size,
file_index=None,
num_epochs=1,
label_key_provided=False,
interleave_cycle_length=1):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(
file_indices,
batch_size,
num_epochs,
cycle_length=interleave_cycle_length):
actual_batch = self._next_actual_batch(
sess, label_key_provided=label_key_provided)
for i in range(len(expected_batch)):
self.assertAllEqual(expected_batch[i], actual_batch[i])
class TextLineDatasetTestBase(test.TestCase):
"""Base class for setting up and testing TextLineDataset."""
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
class TFRecordDatasetTestBase(test.TestCase):
"""Base class for setting up and testing TFRecordDataset."""
def setUp(self):
super(TFRecordDatasetTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
self.filenames = array_ops.placeholder(dtypes.string, shape=[None])
self.num_epochs = array_ops.placeholder_with_default(
constant_op.constant(1, dtypes.int64), shape=[])
self.compression_type = array_ops.placeholder_with_default("", shape=[])
self.batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = core_readers.TFRecordDataset(
self.filenames, self.compression_type).repeat(self.num_epochs)
batch_dataset = repeat_dataset.batch(self.batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
self.init_op = iterator.make_initializer(repeat_dataset)
self.init_batch_op = iterator.make_initializer(batch_dataset)
self.get_next = iterator.get_next()
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
| |
from django.shortcuts import render
from django.http import HttpResponsePermanentRedirect, HttpResponseNotFound
from django.core.urlresolvers import reverse
from django.db import transaction, connection, connections
from datetime import date, timedelta
import re
from collections import namedtuple
from nyc.models import NYCBill
from councilmatic_core.models import Event, Organization, Bill
from councilmatic_core.views import *
from haystack.query import SearchQuerySet
class NYCIndexView(IndexView):
template_name = 'nyc/index.html'
bill_model = NYCBill
class NYCAboutView(AboutView):
template_name = 'nyc/about.html'
class NYCBillDetailView(BillDetailView):
model = NYCBill
def dispatch(self, request, *args, **kwargs):
slug = self.kwargs['slug']
try:
bill = self.model.objects.get(slug=slug)
response = super().dispatch(request, *args, **kwargs)
except NYCBill.DoesNotExist:
bill = None
if bill is None:
try:
bill = self.model.objects.get(slug__startswith=slug)
response = HttpResponsePermanentRedirect(reverse('bill_detail', args=[bill.slug]))
except NYCBill.DoesNotExist:
try:
one, two, three, four = slug.split('-')
short_slug = slug.replace('-' + four, '')
bill = self.model.objects.get(slug__startswith=short_slug)
response = HttpResponsePermanentRedirect(reverse('bill_detail', args=[bill.slug]))
except:
response = HttpResponseNotFound()
return response
class NYCCommitteeDetailView(CommitteeDetailView):
model = Organization
def dispatch(self, request, *args, **kwargs):
slug = self.kwargs['slug']
try:
committee = self.model.objects.get(slug=slug)
response = super().dispatch(request, *args, **kwargs)
except Organization.DoesNotExist:
committee = None
if committee is None:
try:
slug = slug.replace(',', '').replace('\'', '')
committee = self.model.objects.get(slug__startswith=slug)
response = HttpResponsePermanentRedirect(reverse('committee_detail', args=[committee.slug]))
except Organization.DoesNotExist:
response = HttpResponseNotFound()
return response
class NYCPersonDetailView(PersonDetailView):
model = Person
def dispatch(self, request, *args, **kwargs):
slug = self.kwargs['slug']
try:
person = self.model.objects.get(slug=slug)
response = super().dispatch(request, *args, **kwargs)
except Person.DoesNotExist:
person = None
if person is None:
person_name = slug.replace('-', ' ')
try:
slug = slug.replace(',', '').replace('\'', '').replace('--', '-')
person = self.model.objects.get(slug__startswith=slug)
response = HttpResponsePermanentRedirect(reverse('person', args=[person.slug]))
except Person.MultipleObjectsReturned:
person_name = slug.replace('-', ' ').replace('.', '')
# If duplicate person has middle initial.
if re.match(r'\w+[\s.-]\w+[\s.-]\w+', slug) is not None:
person_name = re.sub(r'(\w+\s\w+)(\s\w+)', r'\1.\2', person_name)
person = self.model.objects.get(name__iexact=person_name)
response = HttpResponsePermanentRedirect(reverse('person', args=[person.slug]))
except Person.DoesNotExist:
response = HttpResponseNotFound()
return response
class NYCBillWidgetView(BillWidgetView):
model = NYCBill
class NYCCommitteesView(CommitteesView):
def get_queryset(self):
return []
def get_context_data(self, **kwargs):
context = super(CommitteesView, self).get_context_data(**kwargs)
committees = Organization.committees().filter(name__startswith='Committee')
context['committees'] = [c for c in committees if c.memberships.all()]
subcommittees = Organization.committees().filter(name__startswith='Subcommittee')
context['subcommittees'] = [c for c in subcommittees if c.memberships.all()]
taskforces = Organization.committees().filter(name__startswith='Task Force')
context['taskforces'] = [c for c in taskforces if c.memberships.all()]
return context
class NYCEventDetailView(EventDetailView):
template_name = 'nyc/event.html'
def get_context_data(self, **kwargs):
context = super(EventDetailView, self).get_context_data(**kwargs)
event = context['event']
# Logic for getting relevant board report information.
with connection.cursor() as cursor:
query = '''
SELECT distinct
b.identifier,
b.slug,
b.description,
i.order
FROM councilmatic_core_bill AS b
INNER JOIN councilmatic_core_eventagendaitem as i
ON i.bill_id=b.ocd_id
WHERE i.event_id='{}'
GROUP BY
b.identifier,
b.slug,
b.description,
i.order
ORDER BY i.order
'''.format(event.ocd_id)
cursor.execute(query)
# Get field names
columns = [c[0] for c in cursor.description]
# Create a named tuple
bill_tuple = namedtuple('BillProperties', columns, rename=True)
# Put results inside a list with assigned fields (from namedtuple)
related_bills = [bill_tuple(*r) for r in cursor]
context['related_bills'] = related_bills
return context
class NYCCouncilmaticFacetedSearchView(CouncilmaticFacetedSearchView):
def build_form(self, form_kwargs=None):
form = super(CouncilmaticFacetedSearchView, self).build_form(form_kwargs=form_kwargs)
# For faceted search functionality.
if form_kwargs is None:
form_kwargs = {}
form_kwargs['selected_facets'] = self.request.GET.getlist("selected_facets")
# For remaining search functionality.
data = None
kwargs = {
'load_all': self.load_all,
}
sqs = SearchQuerySet().facet('bill_type')\
.facet('sponsorships', sort='index')\
.facet('controlling_body')\
.facet('inferred_status')\
.highlight()
if form_kwargs:
kwargs.update(form_kwargs)
if len(self.request.GET):
data = self.request.GET
dataDict = dict(data)
if self.searchqueryset is not None:
kwargs['searchqueryset'] = sqs
try:
for el in dataDict['sort_by']:
# Do this, because sometimes the 'el' may include a '?' from the URL
if 'date' in el:
try:
dataDict['ascending']
kwargs['searchqueryset'] = sqs.order_by('last_action_date')
except:
kwargs['searchqueryset'] = sqs.order_by('-last_action_date')
if 'title' in el:
try:
dataDict['descending']
kwargs['searchqueryset'] = sqs.order_by('-sort_name')
except:
kwargs['searchqueryset'] = sqs.order_by('sort_name')
if 'relevance' in el:
kwargs['searchqueryset'] = sqs
except:
kwargs['searchqueryset'] = sqs.order_by('-last_action_date')
return self.form_class(data, **kwargs)
| |
"""Unit tests for qrscp.py verification service."""
import logging
import os
from pathlib import Path
import subprocess
import sys
import tempfile
import time
import pytest
try:
import sqlalchemy
HAVE_SQLALCHEMY = True
except ImportError:
HAVE_SQLALCHEMY = False
from pydicom import dcmread
from pydicom.uid import (
ExplicitVRLittleEndian,
ImplicitVRLittleEndian,
DeflatedExplicitVRLittleEndian,
ExplicitVRBigEndian,
)
from pynetdicom import AE, evt, debug_logger, DEFAULT_TRANSFER_SYNTAXES
from pynetdicom.sop_class import Verification, CTImageStorage
# debug_logger()
APP_DIR = Path(__file__).parent.parent
APP_FILE = APP_DIR / "qrscp" / "qrscp.py"
def start_qrscp(args):
"""Start the qrscp.py app and return the process."""
pargs = [sys.executable, os.fspath(APP_FILE)] + [*args]
return subprocess.Popen(pargs)
def start_qrscp_cli(args):
"""Start the qrscp app using CLI and return the process."""
pargs = [sys.executable, "-m", "pynetdicom", "qrscp"] + [*args]
return subprocess.Popen(pargs)
class EchoSCPBase:
"""Tests for echoscp.py"""
def setup(self):
"""Run prior to each test"""
self.ae = None
self.p = None
self.func = None
self.tfile = tempfile.NamedTemporaryFile()
self.db_location = self.tfile.name
self.instance_location = tempfile.TemporaryDirectory()
self.startup = 1.0
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
if self.p:
self.p.kill()
self.p.wait(timeout=5)
def test_default(self):
"""Test default settings."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context(Verification)
self.p = p = self.func(
[
"--database-location",
self.db_location,
"--instance-location",
self.instance_location.name,
]
)
time.sleep(self.startup)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
p.terminate()
p.wait()
assert p.returncode != 0
assert 16382 == assoc.acceptor.maximum_length
cxs = assoc.accepted_contexts
assert len(cxs) == 1
cxs = {cx.abstract_syntax: cx for cx in cxs}
assert Verification in cxs
def test_flag_version(self, capfd):
"""Test --version flag."""
self.p = p = self.func(
[
"--database-location",
self.db_location,
"--instance-location",
self.instance_location.name,
"--version",
]
)
p.wait()
assert p.returncode == 0
out, err = capfd.readouterr()
assert "qrscp.py v" in out
def test_flag_quiet(self, capfd):
"""Test --quiet flag."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context(Verification)
self.p = p = self.func(
[
"--database-location",
self.db_location,
"--instance-location",
self.instance_location.name,
"-q",
]
)
time.sleep(self.startup)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
status = assoc.send_c_echo()
assert status.Status == 0x0000
assoc.release()
p.terminate()
p.wait()
out, err = capfd.readouterr()
assert out == err == ""
def test_flag_verbose(self, capfd):
"""Test --verbose flag."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context(Verification)
out, err = [], []
self.p = p = self.func(
[
"--database-location",
self.db_location,
"--instance-location",
self.instance_location.name,
"-v",
]
)
time.sleep(self.startup)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
status = assoc.send_c_echo()
assert status.Status == 0x0000
assoc.release()
p.terminate()
p.wait()
out, err = capfd.readouterr()
assert "Accepting Association" in err
assert "Received Echo Request" in err
assert "Association Released" in err
def test_flag_debug(self, capfd):
"""Test --debug flag."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context(Verification)
self.p = p = self.func(
[
"--database-location",
self.db_location,
"--instance-location",
self.instance_location.name,
"-d",
]
)
time.sleep(self.startup)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
status = assoc.send_c_echo()
assert status.Status == 0x0000
assoc.release()
p.terminate()
p.wait()
out, err = capfd.readouterr()
assert "pydicom.read_dataset()" in err
assert "Accept Parameters" in err
assert "Received C-ECHO request from" in err
def test_flag_log_collision(self):
"""Test error with -q -v and -d flag."""
self.p = p = self.func(
[
"--database-location",
self.db_location,
"--instance-location",
self.instance_location.name,
"-v",
"-d",
]
)
p.wait()
assert p.returncode != 0
@pytest.mark.skipif(not HAVE_SQLALCHEMY, reason="Requires sqlalchemy")
class TestEchoSCP(EchoSCPBase):
"""Tests for echoscp.py"""
def setup(self):
"""Run prior to each test"""
super().setup()
self.func = start_qrscp
@pytest.mark.skipif(not HAVE_SQLALCHEMY, reason="Requires sqlalchemy")
class TestEchoSCPCLI(EchoSCPBase):
"""Tests for echoscp using CLI"""
def setup(self):
"""Run prior to each test"""
super().setup()
self.func = start_qrscp_cli
| |
### This program intends to combine all resource in certain path, and generate NodeRefListMul.csv;
### Author: Ye Gao
### Date: 2017-11-7
import csv
import os
import re
import scrapy
class QuotesSpider(scrapy.Spider):
file = open('RootPath.dat', 'r')
root = (file.read()).replace("\n", "") # read root path from RootPath.dat;
file.close()
# This code intends to generate reference link list(RefLinkList);
RefPathList = []
NodePathLayer = [root]
while(len(NodePathLayer) > 0):
NodePathListNew = []
for element in NodePathLayer:
counter = 0
while str(counter+1) in os.listdir(element):
ChildPath = element + "/" + str(counter+1)
NodePathListNew.append(ChildPath)
counter += 1
if counter != 0:
RefPathList.append(element)
NodePathLayer = NodePathListNew
RefLinkList = []
for ElementPath in RefPathList:
GSHF = "file://" + ElementPath + "rl.html"
RefLinkList.append(GSHF)
# declare class variables;
name = "ListReference"
start_urls = RefLinkList
def parse(self, response):
# read node list from csv file;
file = open('NodeRefList.csv', 'rb')
reader = csv.reader(file)
NodeList = list(reader)
file.close()
FirstRow = NodeList.pop(0)
NodePathList = []
for element in NodeList:
NodePathList.append(element[1])
RefJournalList = []
for element in QuotesSpider.RefPathList:
index = NodePathList.index(element)
RefJournalList.append(NodeList[index][6])
NodeLinkList = []
for element in NodeList:
NodeLinkList.append("file://" + element[1] + ".html")
ReferenceList = []
ReferenceAll = []
# index = RefLinkList.index(response.url)
index = (QuotesSpider.start_urls).index(response.url)
NodeIndex = NodeLinkList.index((response.url).replace("rl.html", "/1.html"))
# if journal is ACM digital library;
if (RefJournalList[index] == "ACMDL") or (RefJournalList[index] == "RGate"):
ReferencePiece = []
for reference in response.xpath('//table[@border="0"]//tr[@valign="top"]/td/div//text()').extract():
reference = (reference.strip()).replace("\n","")
if reference != "" and reference != "]" and reference != "[doi>" and reference[0:3] !="10.":
ReferencePiece.append(reference)
CounterReference = 0
for element in ReferencePiece:
if element.isdigit():
if int(element) == CounterReference + 1:
CounterReference += 1
ReferenceCombine = []
counter = len(ReferencePiece) - 1
ReferenceEntry = ""
for element in ReferencePiece:
if ReferencePiece[counter].isdigit() == False:
ReferenceEntry = str(ReferencePiece[counter].encode('ascii', 'ignore')) + ReferenceEntry
else:
if int(ReferencePiece[counter]) != CounterReference:
ReferenceEntry = str(ReferencePiece[counter].encode('ascii', 'ignore')) + ReferenceEntry
else:
ReferenceCombine.append((ReferenceEntry.replace(",","|")).replace(";","|"))
ReferenceEntry = ""
CounterReference = CounterReference - 1
counter = counter - 1
ReferenceList = ReferenceCombine[::-1]
# if journal is IEEE;
elif RefJournalList[index] == "IEEE":
with open((response.url).replace("file://", ""), 'r') as myfile:
data=myfile.read()
temp_1 = re.split(r"<body>", data)# extract content from body;
temp_2 = re.split(r"</body>", temp_1[1])
body = temp_2[0]
notag = ((body.replace(""","")).replace("<em>","")).replace("</em>","") # remove tags;
symbol = (notag.replace("&", "&")).replace("'", "'") # replace utf-8 symbols;
extract = ((((symbol.replace("\n","")).replace("\r","")).replace("\t","")).replace(",","|")).replace(";","|")
ReferenceList = re.split(r"</br></br>", extract)
ReferenceList.pop() # delete last empty element;
# if journal is Wiley;
elif RefJournalList[index] == "Wiley":
ReferenceAll = ""
for reference in response.xpath('//ul[@class="article-section__references-list"]/li/cite').extract():
reference = ((reference.strip()).replace("\n","")).encode('ascii', 'ignore')
reference = (reference.replace(",", "|")).replace(";", "")
step_1 = re.sub("</span>", "", reference)
step_2 = re.sub('<span class="pageLast">', '', step_1)
step_3 = re.sub('<span class="pageFirst">', '', step_2)
step_4 = re.sub('<span class="pubYear">', '', step_3)
step_5 = re.sub('<span class="author">', '', step_4)
step_6 = re.sub('<span class="vol">', '', step_5)
step_7 = re.sub('<span class="citedIssue">', '', step_6)
step_8 = re.sub('<span class="articleTitle">', '', step_7)
step_9 = re.sub('<span class="journalTitle">', '', step_8)
step_10 = re.sub('<cite id="cit..">', '', step_9)
step_11 = re.sub('<cite id="cit.">', '', step_10)
step_12 = re.sub('<em>', '', step_11)
step_13 = re.sub('</em>', '', step_12)
step_14 = re.sub('<span class="publisherLocation">', '', step_13)
step_15 = re.sub('<span class="otherTitle">', '', step_14)
step_16 = re.sub('<span class="bookTitle">', '', step_15)
step_17 = re.sub('<cite id="cgf\d\d\d\d\d-cit-00\d\d">', '', step_16)
step_18 = re.sub('<span class="chapterTitle">', '', step_17)
ReferenceAll = ReferenceAll + step_18
ReferenceList = re.split(r"</cite>", ReferenceAll)
ReferenceList.pop()
# if journal is Elsevier;
elif RefJournalList[index] == "Elsevier":
ReferencePiece = []
for reference in response.xpath('//div/dl[@class="bib-section"]//text()').extract():
reference = (reference.strip()).replace("\n","")
if reference != "" and reference != "]" and reference != "[doi>" and reference[0:3] !="10.":
ReferencePiece.append(reference)
ReferenceCombine = []
counter = len(ReferencePiece) - 1
temp = ""
for element in ReferencePiece:
if (((ReferencePiece[counter]).replace('[', '')).replace(']', '')).isdigit() == False:
temp = str(ReferencePiece[counter].encode('ascii', 'ignore')) + temp
else:
ReferenceCombine.append((temp.replace(",","|")).replace(";","|"))
temp = ""
counter = counter - 1
ReferenceList = ReferenceCombine[::-1]
# if journal is kuleuven;
elif RefJournalList[index] == "kuleuven":
with open((response.url).replace("file://", ""), 'r') as myfile:
data=myfile.read()
temp_1 = ((data.replace(",", "|")).replace(";", "|")).replace("\n", " ")
ReferenceList = re.split(r"<br>", temp_1)
ReferenceList.pop() # delete last empty element;
# if journal is Springer;
elif RefJournalList[index] == "Springer":
ReferencePiece = []
for reference in response.xpath('//div[@class="CitationNumber"]/text() | //div[@class="CitationContent"]/text()').extract():
reference = (reference.strip()).replace("\n","")
ReferencePiece.append(((reference.encode('ascii', 'ignore')).replace(',','|')).replace(';','|'))
ReferenceCombine = []
counter = len(ReferencePiece) - 1
temp = ""
for element in ReferencePiece:
if (ReferencePiece[counter].replace('[', '').replace(']', '')).isdigit() == False:
temp = str(ReferencePiece[counter].encode('ascii', 'ignore')) + temp
else:
ReferenceCombine.append((temp.replace(",","|")).replace(";","|"))
temp = ""
counter = counter - 1
ReferenceList = ReferenceCombine[::-1]
# save CiteTimeList to csv file;
print ReferenceList
for element in ReferenceList:
NodeList[NodeIndex + ReferenceList.index(element)][0] = element
NodeList = [FirstRow] + NodeList
file = open('NodeRefList.csv','wb')
for i in NodeList:
for j in i:
file.write(j)
file.write(',')
file.write('\n')
file.close()
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.backend.jvm.repository import Repository
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.tasks.classpath_products import (ArtifactClasspathEntry, ClasspathEntry,
ClasspathProducts)
from pants.base.exceptions import TaskError
from pants_test.base_test import BaseTest
def resolved_example_jar_at(path, org='com.example', name='lib'):
return ResolvedJar(M2Coordinate(org=org, name=name),
cache_path=os.path.join('resolver-cache-dir', path),
pants_path=path)
class ClasspathProductsTest(BaseTest):
def test_single_classpath_element_no_excludes(self):
a = self.make_target('a', JvmTarget)
classpath_product = ClasspathProducts()
path = self.path('jar/path')
self.add_jar_classpath_element_for_path(classpath_product, a, path)
self.assertEqual([('default', path)], classpath_product.get_for_target(a))
def test_copy(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts()
resolved_jar = self.add_jar_classpath_element_for_path(classpath_product,
a,
self._example_jar_path())
classpath_product.add_for_target(a, [('default', self.path('a/path'))])
copied = classpath_product.copy()
self.assertEqual([('default', resolved_jar.pants_path),
('default', self.path('a/path'))], classpath_product.get_for_target(a))
self.assertEqual([('default', resolved_jar.pants_path),
('default', self.path('a/path'))], copied.get_for_target(a))
self.add_excludes_for_targets(copied, b, a)
self.assertEqual([('default', resolved_jar.pants_path),
('default', self.path('a/path'))], classpath_product.get_for_target(a))
self.assertEqual([('default', self.path('a/path'))], copied.get_for_target(a))
copied.add_for_target(b, [('default', self.path('b/path'))])
self.assertEqual([('default', resolved_jar.pants_path),
('default', self.path('a/path'))], classpath_product.get_for_target(a))
self.assertEqual([('default', self.path('a/path')),
('default', self.path('b/path'))], copied.get_for_target(a))
def test_fails_if_paths_outside_buildroot(self):
a = self.make_target('a', JvmTarget)
classpath_product = ClasspathProducts()
with self.assertRaises(TaskError) as cm:
classpath_product.add_for_target(a, [('default', '/dev/null')])
self.assertEqual(
'Classpath entry /dev/null for target a:a is located outside the buildroot.',
str(cm.exception))
def test_fails_if_jar_paths_outside_buildroot(self):
a = self.make_target('a', JvmTarget)
classpath_product = ClasspathProducts()
with self.assertRaises(TaskError) as cm:
classpath_product.add_jars_for_targets([a], 'default', [(resolved_example_jar_at('/dev/null'))])
self.assertEqual(
'Classpath entry /dev/null for target a:a is located outside the buildroot.',
str(cm.exception))
def test_excluded_classpath_element(self):
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example', 'lib')])
classpath_product = ClasspathProducts()
example_jar_path = self._example_jar_path()
self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path)
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([], classpath)
def test_transitive_dependencies_excluded_classpath_element(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts()
self.add_jar_classpath_element_for_path(classpath_product, a, self._example_jar_path())
self.add_excludes_for_targets(classpath_product, b, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([], classpath)
def test_intransitive_dependencies_excluded_classpath_element(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts()
example_jar_path = self._example_jar_path()
classpath_product.add_for_target(a, [('default', example_jar_path)])
classpath_product.add_excludes_for_targets([a, b])
intransitive_classpath = classpath_product.get_for_target(a, transitive=False)
self.assertEqual([('default', example_jar_path)], intransitive_classpath)
def test_parent_exclude_excludes_dependency_jar(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, dependencies=[b], excludes=[Exclude('com.example', 'lib')])
classpath_product = ClasspathProducts()
example_jar_path = self._example_jar_path()
self.add_jar_classpath_element_for_path(classpath_product, b, example_jar_path)
self.add_excludes_for_targets(classpath_product, b, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([], classpath)
def test_exclude_leaves_other_jars_unaffected(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts()
com_example_jar_path = self._example_jar_path()
org_example_jar_path = self.path('ivy/jars/org.example/lib/123.4.jar')
classpath_product.add_jars_for_targets([a], 'default',
[resolved_example_jar_at(com_example_jar_path),
resolved_example_jar_at(org_example_jar_path,
org='org.example')])
self.add_excludes_for_targets(classpath_product, b)
classpath = classpath_product.get_for_target(a)
self.assertEqual([('default', org_example_jar_path)], classpath)
def test_parent_excludes_ignored_for_resolving_child_target(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, dependencies=[b], excludes=[Exclude('com.example', 'lib')])
example_jar_path = self._example_jar_path()
classpath_product = ClasspathProducts()
self.add_jar_classpath_element_for_path(classpath_product, b, example_jar_path)
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_target(b)
self.assertEqual([('default', example_jar_path)], classpath)
def test_excludes_used_across_targets(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example', 'lib')])
classpath_product = ClasspathProducts()
self.add_example_jar_classpath_element_for(classpath_product, b)
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([], classpath)
def test_excludes_similar_org_name(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.exam')], dependencies=[b])
classpath_product = ClasspathProducts()
self.add_example_jar_classpath_element_for(classpath_product, b)
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([('default', self._example_jar_path())], classpath)
def test_excludes_org_name(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example')], dependencies=[b])
classpath_product = ClasspathProducts()
self.add_example_jar_classpath_element_for(classpath_product, b)
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([], classpath)
def test_jar_provided_by_transitive_target_excluded(self):
provider = self.make_target('provider', ExportableJvmLibrary,
provides=Artifact('com.example', 'lib', Repository()))
consumer = self.make_target('consumer', JvmTarget)
root = self.make_target('root', JvmTarget, dependencies=[provider, consumer])
classpath_product = ClasspathProducts()
self.add_example_jar_classpath_element_for(classpath_product, consumer)
self.add_excludes_for_targets(classpath_product, consumer, provider, root)
classpath = classpath_product.get_for_target(root)
self.assertEqual([], classpath)
def test_jar_provided_exclude_with_similar_name(self):
# note exclude 'jars/com.example/l' should not match jars/com.example/lib/jars/123.4.jar
provider = self.make_target('provider', ExportableJvmLibrary,
provides=Artifact('com.example', 'li', Repository()))
root = self.make_target('root', JvmTarget, dependencies=[provider])
classpath_product = ClasspathProducts()
self.add_example_jar_classpath_element_for(classpath_product, root)
self.add_excludes_for_targets(classpath_product, provider, root)
classpath = classpath_product.get_for_target(root)
self.assertEqual([('default', self._example_jar_path())], classpath)
def test_jar_provided_exclude_with_similar_org(self):
provider = self.make_target('provider', ExportableJvmLibrary,
provides=Artifact('com.example.lib', '', Repository()))
root = self.make_target('root', JvmTarget, dependencies=[provider])
classpath_product = ClasspathProducts()
self.add_example_jar_classpath_element_for(classpath_product, root)
self.add_excludes_for_targets(classpath_product, provider, root)
classpath = classpath_product.get_for_target(root)
self.assertEqual([('default', self._example_jar_path())], classpath)
def test_jar_in_classpath_not_a_resolved_jar_ignored_by_excludes(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example')], dependencies=[b])
example_jar_path = self._example_jar_path()
classpath_product = ClasspathProducts()
classpath_product.add_for_target(b, [('default', example_jar_path)])
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([('default', example_jar_path)], classpath)
def test_jar_missing_pants_path_fails_adding(self):
b = self.make_target('b', JvmTarget)
classpath_products = ClasspathProducts()
with self.assertRaises(TaskError) as cm:
classpath_products.add_jars_for_targets([b], 'default',
[ResolvedJar(M2Coordinate(org='org', name='name'),
cache_path='somewhere',
pants_path=None)])
self.assertEqual(
'Jar: org:name:::jar has no specified path.',
str(cm.exception))
def test_get_classpath_entries_for_targets_respect_excludes(self):
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example', 'lib')])
classpath_product = ClasspathProducts()
example_jar_path = self._example_jar_path()
self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path)
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_classpath_entries_for_targets([a])
self.assertEqual([], classpath)
def test_get_classpath_entries_for_targets_ignore_excludes(self):
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example', 'lib')])
classpath_product = ClasspathProducts()
example_jar_path = self._example_jar_path()
resolved_jar = self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path,
conf='fred-conf')
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_classpath_entries_for_targets([a], respect_excludes=False)
expected_entry = ArtifactClasspathEntry(example_jar_path,
resolved_jar.coordinate,
resolved_jar.cache_path)
self.assertEqual([('fred-conf', expected_entry)], list(classpath))
def test_get_classpath_entries_for_targets_transitive(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts()
example_jar_path = self._example_jar_path()
resolved_jar = self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path)
classpath_product.add_for_target(b, [('default', self.path('b/loose/classes/dir'))])
classpath_product.add_for_target(a, [('default', self.path('a/loose/classes/dir')),
('default', self.path('an/internally/generated.jar'))])
classpath = classpath_product.get_classpath_entries_for_targets([a])
self.assertEqual([('default', ArtifactClasspathEntry(example_jar_path,
resolved_jar.coordinate,
resolved_jar.cache_path)),
('default', ClasspathEntry(self.path('a/loose/classes/dir'))),
('default', ClasspathEntry(self.path('an/internally/generated.jar'))),
('default', ClasspathEntry(self.path('b/loose/classes/dir')))],
classpath)
def test_get_classpath_entries_for_targets_intransitive(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts()
example_jar_path = self._example_jar_path()
resolved_jar = self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path)
classpath_product.add_for_target(b, [('default', self.path('b/loose/classes/dir'))])
classpath_product.add_for_target(a, [('default', self.path('a/loose/classes/dir')),
('default', self.path('an/internally/generated.jar'))])
classpath = classpath_product.get_classpath_entries_for_targets([a], transitive=False)
self.assertEqual([('default', ArtifactClasspathEntry(example_jar_path,
resolved_jar.coordinate,
resolved_jar.cache_path)),
('default', ClasspathEntry(self.path('a/loose/classes/dir'))),
('default', ClasspathEntry(self.path('an/internally/generated.jar')))],
classpath)
def test_get_artifact_classpath_entries_for_targets(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts()
example_jar_path = self._example_jar_path()
resolved_jar = self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path)
# These non-artifact classpath entries should be ignored.
classpath_product.add_for_target(b, [('default', self.path('b/loose/classes/dir'))])
classpath_product.add_for_target(a, [('default', self.path('a/loose/classes/dir')),
('default', self.path('an/internally/generated.jar'))])
classpath = classpath_product.get_artifact_classpath_entries_for_targets([a])
self.assertEqual([('default', ArtifactClasspathEntry(example_jar_path,
resolved_jar.coordinate,
resolved_jar.cache_path))],
classpath)
def _example_jar_path(self):
return self.path('ivy/jars/com.example/lib/jars/123.4.jar')
def path(self, p):
return os.path.join(self.build_root, p)
def add_jar_classpath_element_for_path(self,
classpath_product,
target,
example_jar_path,
conf=None):
resolved_jar = resolved_example_jar_at(example_jar_path)
classpath_product.add_jars_for_targets(targets=[target],
conf=conf or 'default',
resolved_jars=[resolved_jar])
return resolved_jar
def add_excludes_for_targets(self, classpath_product, *targets):
classpath_product.add_excludes_for_targets(targets)
def add_example_jar_classpath_element_for(self, classpath_product, target):
self.add_jar_classpath_element_for_path(classpath_product, target, self._example_jar_path())
| |
# TODO: Use the fact that axis can have units to simplify the process
import functools
from typing import TYPE_CHECKING, Optional
import numpy as np
from pandas._libs.tslibs import Period, to_offset
from pandas._libs.tslibs.frequencies import FreqGroup, base_and_stride, get_freq_code
from pandas._typing import FrameOrSeriesUnion
from pandas.core.dtypes.generic import (
ABCDatetimeIndex,
ABCPeriodIndex,
ABCTimedeltaIndex,
)
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.converter import (
TimeSeries_DateFormatter,
TimeSeries_DateLocator,
TimeSeries_TimedeltaFormatter,
)
from pandas.tseries.frequencies import get_period_alias, is_subperiod, is_superperiod
from pandas.tseries.offsets import DateOffset
if TYPE_CHECKING:
from pandas import Series, Index # noqa:F401
# ---------------------------------------------------------------------
# Plotting functions and monkey patches
def _maybe_resample(series: "Series", ax, kwargs):
# resample against axes freq if necessary
freq, ax_freq = _get_freq(ax, series)
if freq is None: # pragma: no cover
raise ValueError("Cannot use dynamic axis without frequency info")
# Convert DatetimeIndex to PeriodIndex
if isinstance(series.index, ABCDatetimeIndex):
series = series.to_period(freq=freq)
if ax_freq is not None and freq != ax_freq:
if is_superperiod(freq, ax_freq): # upsample input
series = series.copy()
series.index = series.index.asfreq(ax_freq, how="s") # type: ignore
freq = ax_freq
elif _is_sup(freq, ax_freq): # one is weekly
how = kwargs.pop("how", "last")
series = getattr(series.resample("D"), how)().dropna()
series = getattr(series.resample(ax_freq), how)().dropna()
freq = ax_freq
elif is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):
_upsample_others(ax, freq, kwargs)
else: # pragma: no cover
raise ValueError("Incompatible frequency conversion")
return freq, series
def _is_sub(f1, f2):
return (f1.startswith("W") and is_subperiod("D", f2)) or (
f2.startswith("W") and is_subperiod(f1, "D")
)
def _is_sup(f1, f2):
return (f1.startswith("W") and is_superperiod("D", f2)) or (
f2.startswith("W") and is_superperiod(f1, "D")
)
def _upsample_others(ax, freq, kwargs):
legend = ax.get_legend()
lines, labels = _replot_ax(ax, freq, kwargs)
_replot_ax(ax, freq, kwargs)
other_ax = None
if hasattr(ax, "left_ax"):
other_ax = ax.left_ax
if hasattr(ax, "right_ax"):
other_ax = ax.right_ax
if other_ax is not None:
rlines, rlabels = _replot_ax(other_ax, freq, kwargs)
lines.extend(rlines)
labels.extend(rlabels)
if legend is not None and kwargs.get("legend", True) and len(lines) > 0:
title = legend.get_title().get_text()
if title == "None":
title = None
ax.legend(lines, labels, loc="best", title=title)
def _replot_ax(ax, freq, kwargs):
data = getattr(ax, "_plot_data", None)
# clear current axes and data
ax._plot_data = []
ax.clear()
_decorate_axes(ax, freq, kwargs)
lines = []
labels = []
if data is not None:
for series, plotf, kwds in data:
series = series.copy()
idx = series.index.asfreq(freq, how="S")
series.index = idx
ax._plot_data.append((series, plotf, kwds))
# for tsplot
if isinstance(plotf, str):
from pandas.plotting._matplotlib import PLOT_CLASSES
plotf = PLOT_CLASSES[plotf]._plot
lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0])
labels.append(pprint_thing(series.name))
return lines, labels
def _decorate_axes(ax, freq, kwargs):
"""Initialize axes for time-series plotting"""
if not hasattr(ax, "_plot_data"):
ax._plot_data = []
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, "legendlabels"):
ax.legendlabels = [kwargs.get("label", None)]
else:
ax.legendlabels.append(kwargs.get("label", None))
ax.view_interval = None
ax.date_axis_info = None
def _get_ax_freq(ax):
"""
Get the freq attribute of the ax object if set.
Also checks shared axes (eg when using secondary yaxis, sharex=True
or twinx)
"""
ax_freq = getattr(ax, "freq", None)
if ax_freq is None:
# check for left/right ax in case of secondary yaxis
if hasattr(ax, "left_ax"):
ax_freq = getattr(ax.left_ax, "freq", None)
elif hasattr(ax, "right_ax"):
ax_freq = getattr(ax.right_ax, "freq", None)
if ax_freq is None:
# check if a shared ax (sharex/twinx) has already freq set
shared_axes = ax.get_shared_x_axes().get_siblings(ax)
if len(shared_axes) > 1:
for shared_ax in shared_axes:
ax_freq = getattr(shared_ax, "freq", None)
if ax_freq is not None:
break
return ax_freq
def _get_period_alias(freq) -> Optional[str]:
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = base_and_stride(freq)[0]
freq = get_period_alias(freq)
return freq
def _get_freq(ax, series: "Series"):
# get frequency from data
freq = getattr(series.index, "freq", None)
if freq is None:
freq = getattr(series.index, "inferred_freq", None)
freq = to_offset(freq)
ax_freq = _get_ax_freq(ax)
# use axes freq if no data freq
if freq is None:
freq = ax_freq
# get the period frequency
freq = _get_period_alias(freq)
return freq, ax_freq
def _use_dynamic_x(ax, data: "FrameOrSeriesUnion") -> bool:
freq = _get_index_freq(data.index)
ax_freq = _get_ax_freq(ax)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
if freq is None:
return False
freq = _get_period_alias(freq)
if freq is None:
return False
# FIXME: hack this for 0.10.1, creating more technical debt...sigh
if isinstance(data.index, ABCDatetimeIndex):
base = get_freq_code(freq)[0]
x = data.index
if base <= FreqGroup.FR_DAY:
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp().tz_localize(x.tz) == x[0]
return True
def _get_index_freq(index: "Index") -> Optional[DateOffset]:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq == "B":
weekdays = np.unique(index.dayofweek) # type: ignore
if (5 in weekdays) or (6 in weekdays):
freq = None
freq = to_offset(freq)
return freq
def _maybe_convert_index(ax, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)):
freq = data.index.freq
if freq is None:
# We only get here for DatetimeIndex
freq = data.index.inferred_freq
freq = to_offset(freq)
if freq is None:
freq = _get_ax_freq(ax)
if freq is None:
raise ValueError("Could not get frequency alias for plotting")
freq = _get_period_alias(freq)
if isinstance(data.index, ABCDatetimeIndex):
data = data.tz_localize(None).to_period(freq=freq)
elif isinstance(data.index, ABCPeriodIndex):
data.index = data.index.asfreq(freq=freq)
return data
# Patch methods for subplot. Only format_dateaxis is currently used.
# Do we need the rest for convenience?
def _format_coord(freq, t, y):
time_period = Period(ordinal=int(t), freq=freq)
return f"t = {time_period} y = {y:8f}"
def format_dateaxis(subplot, freq, index):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
from matplotlib import pylab
# handle index specific formatting
# Note: DatetimeIndex does not use this
# interface. DatetimeIndex uses matplotlib.date directly
if isinstance(index, ABCPeriodIndex):
majlocator = TimeSeries_DateLocator(
freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot
)
minlocator = TimeSeries_DateLocator(
freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot
)
subplot.xaxis.set_major_locator(majlocator)
subplot.xaxis.set_minor_locator(minlocator)
majformatter = TimeSeries_DateFormatter(
freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot
)
minformatter = TimeSeries_DateFormatter(
freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot
)
subplot.xaxis.set_major_formatter(majformatter)
subplot.xaxis.set_minor_formatter(minformatter)
# x and y coord info
subplot.format_coord = functools.partial(_format_coord, freq)
elif isinstance(index, ABCTimedeltaIndex):
subplot.xaxis.set_major_formatter(TimeSeries_TimedeltaFormatter())
else:
raise TypeError("index type not supported")
pylab.draw_if_interactive()
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import recipe_api
class RevisionResolver(object):
"""Resolves the revision based on build properties."""
def resolve(self, properties): # pragma: no cover
raise NotImplementedError()
class RevisionFallbackChain(RevisionResolver):
"""Specify that a given project's sync revision follows the fallback chain."""
def __init__(self, default=None):
self._default = default
def resolve(self, properties):
"""Resolve the revision via the revision fallback chain.
If the given revision was set using the revision_fallback_chain() function,
this function will follow the chain, looking at relevant build properties
until it finds one set or reaches the end of the chain and returns the
default. If the given revision was not set using revision_fallback_chain(),
this function just returns it as-is.
"""
return (properties.get('parent_got_revision') or
properties.get('orig_revision') or
properties.get('revision') or
self._default)
def jsonish_to_python(spec, is_top=False):
"""Turn a json spec into a python parsable object.
This exists because Gclient specs, while resembling json, is actually
ingested using a python "eval()". Therefore a bit of plumming is required
to turn our newly constructed Gclient spec into a gclient-readable spec.
"""
ret = ''
if is_top: # We're the 'top' level, so treat this dict as a suite.
ret = '\n'.join(
'%s = %s' % (k, jsonish_to_python(spec[k])) for k in sorted(spec)
)
else:
if isinstance(spec, dict):
ret += '{'
ret += ', '.join(
"%s: %s" % (repr(str(k)), jsonish_to_python(spec[k]))
for k in sorted(spec)
)
ret += '}'
elif isinstance(spec, list):
ret += '['
ret += ', '.join(jsonish_to_python(x) for x in spec)
ret += ']'
elif isinstance(spec, basestring):
ret = repr(str(spec))
else:
ret = repr(spec)
return ret
class GclientApi(recipe_api.RecipeApi):
# Singleton object to indicate to checkout() that we should run a revert if
# we detect that we're on the tryserver.
RevertOnTryserver = object()
def __init__(self, **kwargs):
super(GclientApi, self).__init__(**kwargs)
self.USE_MIRROR = None
self._spec_alias = None
def __call__(self, name, cmd, infra_step=True, **kwargs):
"""Wrapper for easy calling of gclient steps."""
assert isinstance(cmd, (list, tuple))
prefix = 'gclient '
if self.spec_alias:
prefix = ('[spec: %s] ' % self.spec_alias) + prefix
with self.m.context(
env_suffixes={'PATH': [self.repo_resource()]}):
return self.m.python(prefix + name,
self.repo_resource('gclient.py'),
cmd,
infra_step=infra_step,
**kwargs)
@property
def use_mirror(self):
"""Indicates if gclient will use mirrors in its configuration."""
if self.USE_MIRROR is None:
self.USE_MIRROR = self.m.properties.get('use_mirror', True)
return self.USE_MIRROR
@use_mirror.setter
def use_mirror(self, val): # pragma: no cover
self.USE_MIRROR = val
@property
def spec_alias(self):
"""Optional name for the current spec for step naming."""
return self._spec_alias
@spec_alias.setter
def spec_alias(self, name):
self._spec_alias = name
@spec_alias.deleter
def spec_alias(self):
self._spec_alias = None
def get_config_defaults(self):
return {
'USE_MIRROR': self.use_mirror,
'CACHE_DIR': self.m.infra_paths.default_git_cache_dir,
}
@staticmethod
def config_to_pythonish(cfg):
return jsonish_to_python(cfg.as_jsonish(), True)
# TODO(machenbach): Remove this method when the old mapping is deprecated.
@staticmethod
def got_revision_reverse_mapping(cfg):
"""Returns the merged got_revision_reverse_mapping.
Returns (dict): A mapping from property name -> project name. It merges the
values of the deprecated got_revision_mapping and the new
got_revision_reverse_mapping.
"""
rev_map = cfg.got_revision_mapping.as_jsonish()
reverse_rev_map = cfg.got_revision_reverse_mapping.as_jsonish()
combined_length = len(rev_map) + len(reverse_rev_map)
reverse_rev_map.update({v: k for k, v in rev_map.items()})
# Make sure we never have duplicate values in the old map.
assert combined_length == len(reverse_rev_map)
return reverse_rev_map
def resolve_revision(self, revision):
if hasattr(revision, 'resolve'):
return revision.resolve(self.m.properties)
return revision
def sync(self, cfg, extra_sync_flags=None, **kwargs):
revisions = []
self.set_patch_repo_revision(gclient_config=cfg)
for i, s in enumerate(cfg.solutions):
if i == 0 and s.revision is None:
s.revision = RevisionFallbackChain()
if s.revision is not None and s.revision != '':
fixed_revision = self.resolve_revision(s.revision)
if fixed_revision:
revisions.extend(['--revision', '%s@%s' % (s.name, fixed_revision)])
for name, revision in sorted(cfg.revisions.items()):
fixed_revision = self.resolve_revision(revision)
if fixed_revision:
revisions.extend(['--revision', '%s@%s' % (name, fixed_revision)])
test_data_paths = set(self.got_revision_reverse_mapping(cfg).values() +
[s.name for s in cfg.solutions])
step_test_data = lambda: (
self.test_api.output_json(test_data_paths))
try:
# clean() isn't used because the gclient sync flags passed in checkout()
# do much the same thing, and they're more correct than doing a separate
# 'gclient revert' because it makes sure the other args are correct when
# a repo was deleted and needs to be re-cloned (notably
# --with_branch_heads), whereas 'revert' uses default args for clone
# operations.
#
# TODO(mmoss): To be like current official builders, this step could
# just delete the whole <slave_name>/build/ directory and start each
# build from scratch. That might be the least bad solution, at least
# until we have a reliable gclient method to produce a pristine working
# dir for git-based builds (e.g. maybe some combination of 'git
# reset/clean -fx' and removing the 'out' directory).
j = '-j2' if self.m.platform.is_win else '-j8'
args = ['sync', '--verbose', '--nohooks', j, '--reset', '--force',
'--upstream', '--no-nag-max', '--with_branch_heads',
'--with_tags']
args.extend(extra_sync_flags or [])
if cfg.delete_unversioned_trees:
args.append('--delete_unversioned_trees')
self('sync', args + revisions +
['--output-json', self.m.json.output()],
step_test_data=step_test_data,
**kwargs)
finally:
result = self.m.step.active_result
if result.json.output is not None:
solutions = result.json.output['solutions']
for propname, path in sorted(
self.got_revision_reverse_mapping(cfg).items()):
# gclient json paths always end with a slash
info = solutions.get(path + '/') or solutions.get(path)
if info:
result.presentation.properties[propname] = info['revision']
return result
def inject_parent_got_revision(self, gclient_config=None, override=False):
"""Match gclient config to build revisions obtained from build_properties.
Args:
gclient_config (gclient config object) - The config to manipulate. A value
of None manipulates the module's built-in config (self.c).
override (bool) - If True, will forcibly set revision and custom_vars
even if the config already contains values for them.
"""
cfg = gclient_config or self.c
for prop, custom_var in cfg.parent_got_revision_mapping.items():
val = str(self.m.properties.get(prop, ''))
# TODO(infra): Fix coverage.
if val: # pragma: no cover
# Special case for 'src', inject into solutions[0]
if custom_var is None:
# This is not covered because we are deprecating this feature and
# it is no longer used by the public recipes.
if cfg.solutions[0].revision is None or override: # pragma: no cover
cfg.solutions[0].revision = val
else:
if custom_var not in cfg.solutions[0].custom_vars or override:
cfg.solutions[0].custom_vars[custom_var] = val
def checkout(self, gclient_config=None, revert=RevertOnTryserver,
inject_parent_got_revision=True, extra_sync_flags=None,
**kwargs):
"""Return a step generator function for gclient checkouts."""
cfg = gclient_config or self.c
assert cfg.complete()
if revert is self.RevertOnTryserver:
revert = self.m.tryserver.is_tryserver
if inject_parent_got_revision:
self.inject_parent_got_revision(cfg, override=True)
self('setup', ['config', '--spec', self.config_to_pythonish(cfg)], **kwargs)
sync_step = None
try:
sync_step = self.sync(cfg, extra_sync_flags=extra_sync_flags, **kwargs)
cfg_cmds = [
('user.name', 'local_bot'),
('user.email', 'local_bot@example.com'),
]
for var, val in cfg_cmds:
name = 'recurse (git config %s)' % var
self(name, ['recurse', 'git', 'config', var, val], **kwargs)
finally:
cwd = self.m.context.cwd or self.m.path['start_dir']
if 'checkout' not in self.m.path:
self.m.path['checkout'] = cwd.join(
*cfg.solutions[0].name.split(self.m.path.sep))
return sync_step
def runhooks(self, args=None, name='runhooks', **kwargs):
args = args or []
assert isinstance(args, (list, tuple))
with self.m.context(cwd=(self.m.context.cwd or self.m.path['checkout'])):
return self(name, ['runhooks'] + list(args), infra_step=False, **kwargs)
def break_locks(self):
"""Remove all index.lock files. If a previous run of git crashed, bot was
reset, etc... we might end up with leftover index.lock files.
"""
self.m.python.inline(
'cleanup index.lock',
"""
import os, sys
build_path = sys.argv[1]
if os.path.exists(build_path):
for (path, dir, files) in os.walk(build_path):
for cur_file in files:
if cur_file.endswith('index.lock'):
path_to_file = os.path.join(path, cur_file)
print('deleting %s' % path_to_file)
os.remove(path_to_file)
""",
args=[self.m.path['start_dir']],
infra_step=True,
)
def get_gerrit_patch_root(self, gclient_config=None):
"""Returns local path to the repo where gerrit patch will be applied.
If there is no patch, returns None.
If patch is specified, but such repo is not found among configured solutions
or repo_path_map, returns name of the first solution. This is done solely
for backward compatibility with existing tests.
Please do not rely on this logic in new code.
Instead, properly map a repository to a local path using repo_path_map.
TODO(nodir): remove this. Update all recipe tests to specify a git_repo
matching the recipe.
"""
cfg = gclient_config or self.c
repo_url = self.m.tryserver.gerrit_change_repo_url
if not repo_url:
return None
root = self.get_repo_path(repo_url, gclient_config=cfg)
# This is wrong, but that's what a ton of recipe tests expect today
root = root or cfg.solutions[0].name
return root
def _canonicalize_repo_url(self, repo_url):
"""Attempts to make repo_url canonical. Supports Gitiles URL."""
return self.m.gitiles.canonicalize_repo_url(repo_url)
def get_repo_path(self, repo_url, gclient_config=None):
"""Returns local path to the repo checkout given its url.
Consults cfg.repo_path_map and fallbacks to urls in configured solutions.
Returns None if not found.
"""
rel_path = self._get_repo_path(repo_url, gclient_config=gclient_config)
if rel_path:
return self.m.path.join(*rel_path.split('/'))
return None
def _get_repo_path(self, repo_url, gclient_config=None):
repo_url = self._canonicalize_repo_url(repo_url)
cfg = gclient_config or self.c
rel_path, _ = cfg.repo_path_map.get(repo_url, ('', ''))
if rel_path:
return rel_path
# repo_path_map keys may be non-canonical.
for key, (rel_path, _) in cfg.repo_path_map.items():
if self._canonicalize_repo_url(key) == repo_url:
return rel_path
for s in cfg.solutions:
if self._canonicalize_repo_url(s.url) == repo_url:
return s.name
return None
def set_patch_repo_revision(self, gclient_config=None):
"""Updates config revision corresponding to patched project.
Useful for bot_update only, as this is the only consumer of gclient's config
revision map. This doesn't overwrite the revision if it was already set.
"""
cfg = gclient_config or self.c
repo_url = self.m.tryserver.gerrit_change_repo_url
path, revision = cfg.repo_path_map.get(repo_url, (None, None))
if path and revision and path not in cfg.revisions:
cfg.revisions[path] = revision
| |
from wlauto import Module
from wlauto.exceptions import ConfigError, DeviceError
# a dict of governor name and a list of it tunables that can't be read
WRITE_ONLY_TUNABLES = {
'interactive': ['boostpulse']
}
class CpufreqModule(Module):
name = 'devcpufreq'
description = """
cpufreq-related functionality module for the device. Query and set frequencies, governors, etc.
APIs in this module break down into three categories: those that operate on cpus, those that
operate on cores, and those that operate on clusters.
"cpu" APIs expect a cpufreq CPU id, which could be either an integer or or a string of the
form "cpu0".
"cluster" APIs expect a cluster ID. This is an integer as defined by the
``device.core_clusters`` list.
"core" APIs expect a core name, as defined by ``device.core_names`` list.
"""
capabilities = ['cpufreq']
def probe(self, device): # pylint: disable=no-self-use
path = '/sys/devices/system/cpu/cpufreq'
return device.file_exists(path)
def initialize(self, context):
# pylint: disable=W0201
CpufreqModule._available_governors = {}
CpufreqModule._available_governor_tunables = {}
CpufreqModule.device = self.root_owner
def list_available_cpu_governors(self, cpu):
"""Returns a list of governors supported by the cpu."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
if cpu not in self._available_governors:
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_available_governors'.format(cpu)
output = self.device.get_sysfile_value(sysfile)
self._available_governors[cpu] = output.strip().split() # pylint: disable=E1103
return self._available_governors[cpu]
def get_cpu_governor(self, cpu):
"""Returns the governor currently set for the specified CPU."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
return self.device.get_sysfile_value(sysfile)
def set_cpu_governor(self, cpu, governor, **kwargs):
"""
Set the governor for the specified CPU.
See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
:param cpu: The CPU for which the governor is to be set. This must be
the full name as it appears in sysfs, e.g. "cpu0".
:param governor: The name of the governor to be used. This must be
supported by the specific device.
Additional keyword arguments can be used to specify governor tunables for
governors that support them.
:note: On big.LITTLE all cores in a cluster must be using the same governor.
Setting the governor on any core in a cluster will also set it on all
other cores in that cluster.
:raises: ConfigError if governor is not supported by the CPU.
:raises: DeviceError if, for some reason, the governor could not be set.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
supported = self.list_available_cpu_governors(cpu)
if governor not in supported:
raise ConfigError('Governor {} not supported for cpu {}'.format(governor, cpu))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
self.device.set_sysfile_value(sysfile, governor)
self.set_cpu_governor_tunables(cpu, governor, **kwargs)
def list_available_cpu_governor_tunables(self, cpu):
"""Returns a list of tunables available for the governor on the specified CPU."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
governor = self.get_cpu_governor(cpu)
if governor not in self._available_governor_tunables:
try:
tunables_path = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(cpu, governor)
self._available_governor_tunables[governor] = self.device.listdir(tunables_path)
except DeviceError: # probably an older kernel
try:
tunables_path = '/sys/devices/system/cpu/cpufreq/{}'.format(governor)
self._available_governor_tunables[governor] = self.device.listdir(tunables_path)
except DeviceError: # governor does not support tunables
self._available_governor_tunables[governor] = []
return self._available_governor_tunables[governor]
def get_cpu_governor_tunables(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
governor = self.get_cpu_governor(cpu)
tunables = {}
for tunable in self.list_available_cpu_governor_tunables(cpu):
if tunable not in WRITE_ONLY_TUNABLES.get(governor, []):
try:
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
tunables[tunable] = self.device.get_sysfile_value(path)
except DeviceError: # May be an older kernel
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
tunables[tunable] = self.device.get_sysfile_value(path)
return tunables
def set_cpu_governor_tunables(self, cpu, governor, **kwargs):
"""
Set tunables for the specified governor. Tunables should be specified as
keyword arguments. Which tunables and values are valid depends on the
governor.
:param cpu: The cpu for which the governor will be set. This must be the
full cpu name as it appears in sysfs, e.g. ``cpu0``.
:param governor: The name of the governor. Must be all lower case.
The rest should be keyword parameters mapping tunable name onto the value to
be set for it.
:raises: ConfigError if governor specified is not a valid governor name, or if
a tunable specified is not valid for the governor.
:raises: DeviceError if could not set tunable.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
valid_tunables = self.list_available_cpu_governor_tunables(cpu)
for tunable, value in kwargs.iteritems():
if tunable in valid_tunables:
try:
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
self.device.set_sysfile_value(path, value)
except DeviceError: # May be an older kernel
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
self.device.set_sysfile_value(path, value)
else:
message = 'Unexpected tunable {} for governor {} on {}.\n'.format(tunable, governor, cpu)
message += 'Available tunables are: {}'.format(valid_tunables)
raise ConfigError(message)
def list_available_core_frequencies(self, core):
cpu = self.get_core_online_cpu(core)
return self.list_available_cpu_frequencies(cpu)
def list_available_cpu_frequencies(self, cpu):
"""Returns a list of frequencies supported by the cpu or an empty list
if not could be found."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
try:
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu)
output = self.device.execute(cmd)
available_frequencies = map(int, output.strip().split()) # pylint: disable=E1103
except DeviceError:
# On some devices scaling_available_frequencies is not generated.
# http://adrynalyne-teachtofish.blogspot.co.uk/2011/11/how-to-enable-scalingavailablefrequenci.html
# Fall back to parsing stats/time_in_state
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/stats/time_in_state'.format(cpu)
out_iter = iter(self.device.execute(cmd).strip().split())
available_frequencies = map(int, reversed([f for f, _ in zip(out_iter, out_iter)]))
return available_frequencies
def get_cpu_min_frequency(self, cpu):
"""
Returns the min frequency currently set for the specified CPU.
Warning, this method does not check if the cpu is online or not. It will
try to read the minimum frequency and the following exception will be
raised ::
:raises: DeviceError if for some reason the frequency could not be read.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
return self.device.get_sysfile_value(sysfile)
def set_cpu_min_frequency(self, cpu, frequency):
"""
Set's the minimum value for CPU frequency. Actual frequency will
depend on the Governor used and may vary during execution. The value should be
either an int or a string representing an integer. The Value must also be
supported by the device. The available frequencies can be obtained by calling
get_available_frequencies() or examining
/sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
on the device.
:raises: ConfigError if the frequency is not supported by the CPU.
:raises: DeviceError if, for some reason, frequency could not be set.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
available_frequencies = self.list_available_cpu_frequencies(cpu)
try:
value = int(frequency)
if available_frequencies and value not in available_frequencies:
raise ConfigError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
value,
available_frequencies))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
self.device.set_sysfile_value(sysfile, value)
except ValueError:
raise ValueError('value must be an integer; got: "{}"'.format(value))
def get_cpu_frequency(self, cpu):
"""
Returns the current frequency currently set for the specified CPU.
Warning, this method does not check if the cpu is online or not. It will
try to read the current frequency and the following exception will be
raised ::
:raises: DeviceError if for some reason the frequency could not be read.
"""
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_cur_freq'.format(cpu)
return self.device.get_sysfile_value(sysfile)
def set_cpu_frequency(self, cpu, frequency, exact=True):
"""
Set's the minimum value for CPU frequency. Actual frequency will
depend on the Governor used and may vary during execution. The value should be
either an int or a string representing an integer.
If ``exact`` flag is set (the default), the Value must also be supported by
the device. The available frequencies can be obtained by calling
get_available_frequencies() or examining
/sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
on the device (if it exists).
:raises: ConfigError if the frequency is not supported by the CPU.
:raises: DeviceError if, for some reason, frequency could not be set.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
try:
value = int(frequency)
if exact:
available_frequencies = self.list_available_cpu_frequencies(cpu)
if available_frequencies and value not in available_frequencies:
raise ConfigError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
value,
available_frequencies))
if self.get_cpu_governor(cpu) != 'userspace':
raise ConfigError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_setspeed'.format(cpu)
self.device.set_sysfile_value(sysfile, value, verify=False)
except ValueError:
raise ValueError('frequency must be an integer; got: "{}"'.format(value))
def get_cpu_max_frequency(self, cpu):
"""
Returns the max frequency currently set for the specified CPU.
Warning, this method does not check if the cpu is online or not. It will
try to read the maximum frequency and the following exception will be
raised ::
:raises: DeviceError if for some reason the frequency could not be read.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
return self.device.get_sysfile_value(sysfile)
def set_cpu_max_frequency(self, cpu, frequency):
"""
Set's the minimum value for CPU frequency. Actual frequency will
depend on the Governor used and may vary during execution. The value should be
either an int or a string representing an integer. The Value must also be
supported by the device. The available frequencies can be obtained by calling
get_available_frequencies() or examining
/sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
on the device.
:raises: ConfigError if the frequency is not supported by the CPU.
:raises: DeviceError if, for some reason, frequency could not be set.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
available_frequencies = self.list_available_cpu_frequencies(cpu)
try:
value = int(frequency)
if available_frequencies and value not in available_frequencies:
raise DeviceError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
value,
available_frequencies))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
self.device.set_sysfile_value(sysfile, value)
except ValueError:
raise ValueError('value must be an integer; got: "{}"'.format(value))
# Core- and cluster-level mapping for the above cpu-level APIs above. The
# APIs make the following assumptions, which were True for all devices that
# existed at the time of writing:
# 1. A cluster can only contain cores of one type.
# 2. All cores in a cluster are tied to the same DVFS domain, therefore
# changes to cpufreq for a core will affect all other cores on the
# same cluster.
def get_core_clusters(self, core, strict=True):
"""Returns the list of clusters that contain the specified core. if ``strict``
is ``True``, raises ValueError if no clusters has been found (returns empty list
if ``strict`` is ``False``)."""
core_indexes = [i for i, c in enumerate(self.device.core_names) if c == core]
clusters = sorted(list(set(self.device.core_clusters[i] for i in core_indexes)))
if strict and not clusters:
raise ValueError('No active clusters for core {}'.format(core))
return clusters
def get_cluster_active_cpu(self, cluster):
"""Returns the first *active* cpu for the cluster. If the entire cluster
has been hotplugged, this will raise a ``ValueError``."""
cpu_indexes = set([i for i, c in enumerate(self.device.core_clusters) if c == cluster])
active_cpus = sorted(list(cpu_indexes.intersection(self.device.online_cpus)))
if not active_cpus:
raise ValueError('All cpus for cluster {} are offline'.format(cluster))
return active_cpus[0]
def list_available_core_governors(self, core):
cpu = self.get_core_online_cpu(core)
return self.list_available_cpu_governors(cpu)
def list_available_cluster_governors(self, cluster):
cpu = self.get_cluster_active_cpu(cluster)
return self.list_available_cpu_governors(cpu)
def get_core_governor(self, core):
cpu = self.get_core_online_cpu(core)
return self.get_cpu_governor(cpu)
def set_core_governor(self, core, governor, **tunables):
for cluster in self.get_core_clusters(core):
self.set_cluster_governor(cluster, governor, **tunables)
def get_cluster_governor(self, cluster):
cpu = self.get_cluster_active_cpu(cluster)
return self.get_cpu_governor(cpu)
def set_cluster_governor(self, cluster, governor, **tunables):
cpu = self.get_cluster_active_cpu(cluster)
return self.set_cpu_governor(cpu, governor, **tunables)
def list_available_cluster_governor_tunables(self, cluster):
cpu = self.get_cluster_active_cpu(cluster)
return self.list_available_cpu_governor_tunables(cpu)
def get_cluster_governor_tunables(self, cluster):
cpu = self.get_cluster_active_cpu(cluster)
return self.get_cpu_governor_tunables(cpu)
def set_cluster_governor_tunables(self, cluster, governor, **tunables):
cpu = self.get_cluster_active_cpu(cluster)
return self.set_cpu_governor_tunables(cpu, governor, **tunables)
def get_cluster_min_frequency(self, cluster):
cpu = self.get_cluster_active_cpu(cluster)
return self.get_cpu_min_frequency(cpu)
def set_cluster_min_frequency(self, cluster, freq):
cpu = self.get_cluster_active_cpu(cluster)
return self.set_cpu_min_frequency(cpu, freq)
def get_cluster_cur_frequency(self, cluster):
cpu = self.get_cluster_active_cpu(cluster)
return self.get_cpu_cur_frequency(cpu)
def set_cluster_cur_frequency(self, cluster, freq):
cpu = self.get_cluster_active_cpu(cluster)
return self.set_cpu_frequency(cpu, freq)
def get_cluster_max_frequency(self, cluster):
cpu = self.get_cluster_active_cpu(cluster)
return self.get_cpu_max_frequency(cpu)
def set_cluster_max_frequency(self, cluster, freq):
cpu = self.get_cluster_active_cpu(cluster)
return self.set_cpu_max_frequency(cpu, freq)
def get_core_online_cpu(self, core):
for cluster in self.get_core_clusters(core):
try:
return self.get_cluster_active_cpu(cluster)
except ValueError:
pass
raise ValueError('No active CPUs found for core {}'.format(core))
def list_available_core_governor_tunables(self, core):
return self.list_available_cpu_governor_tunables(self.get_core_online_cpu(core))
def get_core_governor_tunables(self, core):
return self.get_cpu_governor_tunables(self.get_core_online_cpu(core))
def set_core_governor_tunables(self, core, tunables):
for cluster in self.get_core_clusters(core):
governor = self.get_cluster_governor(cluster)
self.set_cluster_governor_tunables(cluster, governor, **tunables)
def get_core_min_frequency(self, core):
return self.get_cpu_min_frequency(self.get_core_online_cpu(core))
def set_core_min_frequency(self, core, freq):
for cluster in self.get_core_clusters(core):
self.set_cluster_min_frequency(cluster, freq)
def get_core_cur_frequency(self, core):
return self.get_cpu_cur_frequency(self.get_core_online_cpu(core))
def set_core_cur_frequency(self, core, freq):
for cluster in self.get_core_clusters(core):
self.set_cluster_cur_frequency(cluster, freq)
def get_core_max_frequency(self, core):
return self.get_cpu_max_frequency(self.get_core_online_cpu(core))
def set_core_max_frequency(self, core, freq):
for cluster in self.get_core_clusters(core):
self.set_cluster_max_frequency(cluster, freq)
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Language.abbr'
db.add_column('courses_language', 'abbr',
self.gf('django.db.models.fields.CharField')(max_length=2, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Language.abbr'
db.delete_column('courses_language', 'abbr')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254'})
},
'badges.alignment': {
'Meta': {'object_name': 'Alignment'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'badges.badge': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'Badge'},
'alignments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'alignments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Alignment']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'criteria': ('django.db.models.fields.URLField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'tags'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'badges.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courses.announcement': {
'Meta': {'ordering': "('-datetime',)", 'object_name': 'Announcement'},
'content': ('tinymce.models.HTMLField', [], {}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']", 'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'courses.attachment': {
'Meta': {'object_name': 'Attachment'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']"})
},
'courses.course': {
'Meta': {'ordering': "['order']", 'object_name': 'Course'},
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'certification_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'completion_badge': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'course'", 'null': 'True', 'to': "orm['badges.Badge']"}),
'created_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'courses_created_of'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['courses.Course']"}),
'description': ('tinymce.models.HTMLField', [], {}),
'ects': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_method': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '200'}),
'estimated_effort': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'hashtag': ('django.db.models.fields.CharField', [], {'default': "'Hashtag'", 'max_length': '128'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_audience': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'is_activity_clonable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['courses.Language']", 'symmetrical': 'False'}),
'learning_goals': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'max_mass_emails_month': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'courses_as_owner'", 'to': "orm['auth.User']"}),
'promotion_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'promotion_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'requirements': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'static_page': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['courses.StaticPage']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'courses_as_student'", 'blank': 'True', 'through': "orm['courses.CourseStudent']", 'to': "orm['auth.User']"}),
'teachers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'courses_as_teacher'", 'symmetrical': 'False', 'through': "orm['courses.CourseTeacher']", 'to': "orm['auth.User']"}),
'threshold': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_alt': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user_score': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'courses.coursestudent': {
'Meta': {'object_name': 'CourseStudent'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_course_status': ('django.db.models.fields.CharField', [], {'default': "'f'", 'max_length': '1'}),
'progress': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courses.courseteacher': {
'Meta': {'ordering': "['order']", 'object_name': 'CourseTeacher'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courses.knowledgequantum': {
'Meta': {'ordering': "['order']", 'unique_together': "(('title', 'unit'),)", 'object_name': 'KnowledgeQuantum'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'supplementary_material': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'teacher_comments': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unit': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Unit']"}),
'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'courses.language': {
'Meta': {'object_name': 'Language'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'courses.option': {
'Meta': {'unique_together': "(('question', 'x', 'y'),)", 'object_name': 'Option'},
'feedback': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'optiontype': ('django.db.models.fields.CharField', [], {'default': "'t'", 'max_length': '1'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Question']"}),
'solution': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '100'}),
'x': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'y': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'courses.question': {
'Meta': {'object_name': 'Question'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']", 'unique': 'True'}),
'last_frame': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'solution_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'solution_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'solution_text': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'use_last_frame': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'courses.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'body': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'courses.unit': {
'Meta': {'ordering': "['order']", 'unique_together': "(('title', 'course'),)", 'object_name': 'Unit'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unittype': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
}
}
complete_apps = ['courses']
| |
"""Module for constructing RNN Cells."""
import math
import tensorflow as tf
from tensorflow.models.rnn import linear
class RNNCell(object):
"""Abstract object representing an RNN cell.
An RNN cell, in the most abstract setting, is anything that has
a state -- a vector of floats of size self.state_size -- and performs some
operation that takes inputs of size self.input_size. This operation
results in an output of size self.output_size and a new state.
This module provides a number of basic commonly used RNN cells, such as
LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number
of operators that allow add dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by a super-class, MultiRNNCell,
defined later. Every RNNCell must have the properties below and and
implement __call__ with the following signature.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: 2D Tensor with shape [batch_size x self.input_size].
state: 2D Tensor with shape [batch_size x self.state_size].
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A 2D Tensor with shape [batch_size x self.output_size]
- New state: A 2D Tensor with shape [batch_size x self.state_size].
"""
raise NotImplementedError("Abstract method")
@property
def input_size(self):
"""Integer: size of inputs accepted by this cell."""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
@property
def state_size(self):
"""Integer: size of state used by this cell."""
raise NotImplementedError("Abstract method")
def zero_state(self, batch_size, dtype):
"""Return state tensor (shape [batch_size x state_size]) filled with 0.
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
A 2D Tensor of shape [batch_size x state_size] filled with zeros.
"""
zeros = tf.zeros(tf.pack([batch_size, self.state_size]), dtype=dtype)
# The reshape below is a no-op, but it allows shape inference of shape[1].
return tf.reshape(zeros, [-1, self.state_size])
class BasicRNNCell(RNNCell):
"""The most basic RNN cell."""
def __init__(self, num_units):
self._num_units = num_units
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
with tf.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
output = tf.tanh(linear.linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units):
self._num_units = num_units
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with tf.variable_scope(scope or type(self).__name__): # "GRUCell"
with tf.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not udpate.
r, u = tf.split(1, 2, linear.linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = tf.sigmoid(r), tf.sigmoid(u)
with tf.variable_scope("Candidate"):
c = tf.tanh(linear.linear([inputs, r * state], self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/pdf/1409.2329v5.pdf.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
Biases of the forget gate are initialized by default to 1 in order to reduce
the scale of forgetting in the beginning of the training.
"""
def __init__(self, num_units, forget_bias=1.0):
self._num_units = num_units
self._forget_bias = forget_bias
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return 2 * self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with tf.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
c, h = tf.split(1, 2, state)
concat = linear.linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(1, 4, concat)
new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(j)
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat(1, [new_c, new_h])
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
It uses peep-hole connections, optional cell clipping, and an optional
projection layer.
"""
def __init__(self, num_units, input_size,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None,
num_unit_shards=1, num_proj_shards=1):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: int, The dimensionality of the inputs into the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
Note that num_unit_shards must evenly divide num_units * 4.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
Note that num_proj_shards must evenly divide num_proj
(if num_proj is not None).
Raises:
ValueError: if num_unit_shards doesn't divide 4 * num_units or
num_proj_shards doesn't divide num_proj
"""
self._num_units = num_units
self._input_size = input_size
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
if (num_units * 4) % num_unit_shards != 0:
raise ValueError("num_unit_shards must evently divide 4 * num_units")
if num_proj and num_proj % num_proj_shards != 0:
raise ValueError("num_proj_shards must evently divide num_proj")
if num_proj:
self._state_size = num_units + num_proj
self._output_size = num_proj
else:
self._state_size = 2 * num_units
self._output_size = num_units
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
"""Run one step of LSTM.
Args:
input_: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "input_" when previous state was "state".
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "input_" when previous state was "state".
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
c_prev = tf.slice(state, [0, 0], [-1, self._num_units])
m_prev = tf.slice(state, [0, self._num_units], [-1, num_proj])
dtype = input_.dtype
unit_shard_size = (4 * self._num_units) / self._num_unit_shards
with tf.variable_scope(scope or type(self).__name__): # "LSTMCell"
w = tf.concat(
1, [tf.get_variable("W_%d" % i,
shape=[self.input_size + num_proj,
unit_shard_size],
initializer=self._initializer,
dtype=dtype)
for i in range(self._num_unit_shards)])
b = tf.get_variable(
"B", shape=[4 * self._num_units],
initializer=tf.zeros_initializer, dtype=dtype)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = tf.concat(1, [input_, m_prev])
i, j, f, o = tf.split(1, 4, tf.nn.bias_add(tf.matmul(cell_inputs, w), b))
# Diagonal connections
if self._use_peepholes:
w_f_diag = tf.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = tf.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = tf.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (tf.sigmoid(f + 1 + w_f_diag * c_prev) * c_prev +
tf.sigmoid(i + w_i_diag * c_prev) * tf.tanh(j))
else:
c = (tf.sigmoid(f + 1) * c_prev + tf.sigmoid(i) * tf.tanh(j))
if self._cell_clip is not None:
c = tf.clip_by_value(c, -self._cell_clip, self._cell_clip)
if self._use_peepholes:
m = tf.sigmoid(o + w_o_diag * c) * tf.tanh(c)
else:
m = tf.sigmoid(o) * tf.tanh(c)
if self._num_proj is not None:
proj_shard_size = self._num_proj / self._num_proj_shards
w_proj = tf.concat(
1, [tf.get_variable("W_P_%d" % i,
shape=[self._num_units, proj_shard_size],
initializer=self._initializer, dtype=dtype)
for i in range(self._num_proj_shards)])
# TODO(ebrevdo), use matmulsum
m = tf.matmul(m, w_proj)
return m, tf.concat(1, [c, m])
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
@property
def input_size(self):
return self._cell.input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with tf.variable_scope(scope or type(self).__name__):
projected = linear.linear(output, self._output_size, True)
return projected, res_state
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concated sequence, then split it.
"""
def __init__(self, cell, input_size):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
input_size: integer, the size of the inputs before projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if input_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if input_size < 1:
raise ValueError("Parameter input_size must be > 0: %d." % input_size)
self._cell = cell
self._input_size = input_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
with tf.variable_scope(scope or type(self).__name__):
projected = linear.linear(inputs, self._cell.input_size, True)
return self._cell(projected, state)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def input_size(self):
return self._cell.input_size
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = tf.nn.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = tf.nn.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concated sequence, then split it and
feed into your RNN.
"""
def __init__(self, cell, embedding_classes=0, embedding=None,
initializer=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding: Variable, the embedding to use; if None, a new embedding
will be created; if set, then embedding_classes is not required.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes < 1 and embedding is None:
raise ValueError("Pass embedding or embedding_classes must be > 0: %d."
% embedding_classes)
if embedding_classes > 0 and embedding is not None:
if embedding.size[0] != embedding_classes:
raise ValueError("You declared embedding_classes=%d but passed an "
"embedding for %d classes." % (embedding.size[0],
embedding_classes))
if embedding.size[1] != cell.input_size:
raise ValueError("You passed embedding with output size %d and a cell"
" that accepts size %d." % (embedding.size[1],
cell.input_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding = embedding
self._initializer = initializer
@property
def input_size(self):
return 1
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with tf.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with tf.device("/cpu:0"):
if self._embedding:
embedding = self._embedding
else:
if self._initializer:
initializer = self._initializer
elif tf.get_variable_scope().initializer:
initializer = tf.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = tf.random_uniform_initializer(-sqrt3, sqrt3)
embedding = tf.get_variable("embedding", [self._embedding_classes,
self._cell.input_size],
initializer=initializer)
embedded = tf.nn.embedding_lookup(embedding, tf.reshape(inputs, [-1]))
return self._cell(embedded, state)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
Raises:
ValueError: if cells is empty (not allowed) or if their sizes don't match.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
for i in xrange(len(cells) - 1):
if cells[i + 1].input_size != cells[i].output_size:
raise ValueError("In MultiRNNCell, the input size of each next"
" cell must match the output size of the previous one."
" Mismatched output size in cell %d." % i)
self._cells = cells
@property
def input_size(self):
return self._cells[0].input_size
@property
def output_size(self):
return self._cells[-1].output_size
@property
def state_size(self):
return sum([cell.state_size for cell in self._cells])
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with tf.variable_scope(scope or type(self).__name__): # "MultiRNNCell"
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with tf.variable_scope("Cell%d" % i):
cur_state = tf.slice(state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
return cur_inp, tf.concat(1, new_states)
| |
# -*- coding: utf-8 -*-
# Standard library imports
import os
import shutil
from collections import defaultdict
from contextlib import contextmanager
# Local imports
from .packages import yaml
from . import utils, lib
# Third party imports
from maya import cmds
class ShadeSet(dict):
'''A dictionary subclass used to gather and export scene shading data
Arguments:
path (str): Path to shadeset yml file.
'''
registry = set()
def __init__(self, path=None, *args, **kwargs):
self.path = path
self.root = None
self.name = None
if self.path:
self.root = os.path.dirname(self.path)
self.name = os.path.splitext(os.path.basename(self.path))[0]
super(ShadeSet, self).__init__(*args, **kwargs)
def relative(self, path):
return os.path.join(self.root, path)
@classmethod
def load(cls, shade_path):
'''Load scene shading data from an exported shadeset'''
with open(shade_path, 'r') as f:
shade_data = yaml.load(f.read())
return cls(shade_path, shade_data)
@classmethod
def gather(cls, selection=True, render_layers=False):
'''Gather shading data from a scene using all registered
`Subsets`.
Arguments:
selection (bool): Gather shading data for the selected transforms
Returns:
ShadeSet object containing the gathered shading data.
'''
shade_set = cls()
if render_layers:
layers_data = defaultdict(dict)
with RenderLayers(RenderLayer.names()) as layers:
for layer in layers:
layer.activate()
for subset in cls.registry:
data = subset.gather(selection=selection)
layers_data[layer.name].update(data)
if layers_data:
shade_set['render_layers'] = dict(layers_data)
for subset in cls.registry:
data = subset.gather(selection=selection)
shade_set.update(data)
return shade_set
@utils.maintains_selection
def reference(self):
'''Reference subset dependencies.'''
for subset in self.registry:
subset.reference(self)
@utils.maintains_selection
def import_(self):
'''Import subset dependencies.'''
for subset in self.registry:
subset.import_(self)
@utils.maintains_selection
def apply(self, selection=False, render_layers=False):
'''Apply this `ShadeSet` to the currently opened scene'''
for subset in self.registry:
subset.apply(self, selection=selection)
if not render_layers:
return
render_layers = self.get('render_layers', None)
if render_layers:
with RenderLayers(render_layers.keys()) as layers:
for layer in layers:
if not layer.exists:
layer.create()
layer.activate()
for subset in self.registry:
subset.apply(
render_layers[layer.name],
selection=selection
)
@utils.maintains_selection
def export(self, outdir, name):
'''Export this `ShadeSet` to a directory
Arguments:
outdir (str): Output directory
name (str): Basename of output files
'''
made_dir = False
if not os.path.exists(outdir):
os.makedirs(outdir)
made_dir = True
try:
self._export(outdir, name)
except Exception:
if made_dir:
shutil.rmtree(outdir)
raise
def _export(self, outdir, name):
for subset in self.registry:
subset.export(self, outdir, name)
shade_path = os.path.join(outdir, name + '.yml')
encoded = yaml.safe_dump(dict(self), default_flow_style=False)
with open(shade_path, 'w') as f:
f.write(encoded)
class SubSet(object):
'''Base class for all subsets of shading data.'''
def gather(self, selection):
raise NotImplementedError()
def import_(self, shade_set):
pass
def reference(self, shade_set):
pass
def export(self, shade_set, outdir, name):
pass
def apply(self, shade_set, selection=False):
raise NotImplementedError()
class ShadingGroupsSet(SubSet):
'''Gathers and Applies shader assignments.'''
def path(self, shade_set):
return os.path.join(
shade_set.root,
shade_set.name + '_shadingGroups.mb'
)
def gather(self, selection):
data = {}
if selection:
transforms = cmds.ls(sl=True, long=True)
else:
transforms = cmds.ls(long=True, transforms=True)
shading_groups = []
for t in transforms:
sgs = utils.get_shading_groups(t)
if sgs:
shading_groups.extend(sgs)
shading_groups = set(shading_groups)
for sg in shading_groups:
members = utils.get_members(sg)
if not members:
continue
_id = utils.add_id(sg)
members = utils.filter_bad_face_assignments(members)
members = utils.shorten_names(members)
data[str(sg)] = {
'meta_id': _id,
'members': members,
}
return {'shadingGroups': data}
def import_(self, shade_set):
path = self.path(shade_set)
utils.import_shader(path)
def reference(self, shade_set):
path = self.path(shade_set)
utils.reference_shader(path, namespace='sg')
def export(self, shade_set, outdir, name):
shading_groups = shade_set['shadingGroups'].keys()
if 'render_layers' in shade_set:
for render_layer, data in shade_set['render_layers'].items():
shading_groups.extend(data['shadingGroups'].keys())
shading_groups = list(set(shading_groups))
path = os.path.join(outdir, name + '_shadingGroups.mb')
utils.export_shader(shading_groups, path)
def apply(self, shade_set, selection=False):
for sg, sg_data in shade_set['shadingGroups'].items():
if sg == 'initialShadingGroup':
shading_group = 'initialShadingGroup'
else:
shading_group = utils.node_from_id(sg_data['meta_id'])
members = utils.find_members(sg_data['members'])
if selection:
nodes = cmds.ls(sl=True, long=True)
members = [m for m in members
if utils.member_in_hierarchy(m, *nodes)]
utils.assign_shading_group(shading_group, members)
class CustomAttributesSet(SubSet):
'''Gathers and Applies shape attributes by name and prefix.'''
def gather(self, selection):
data = {}
if selection:
shapes = [utils.get_shape(n) for n in cmds.ls(sl=True, long=True)]
else:
shapes = [
utils.get_shape(n)
for n in cmds.ls(long=True, transforms=True)
]
for shape in shapes:
short_name = utils.shorten_name(shape)
shape_data = {}
for prefix in lib.get_export_attr_prefixes():
attrs = utils.get_prefixed_attrs(shape, prefix)
for attr in attrs:
shape_data[attr] = utils.get_attr_data(shape, attr)
for attr in lib.get_export_attrs():
attr_path = shape + '.' + attr
if cmds.objExists(attr_path):
shape_data[attr] = utils.get_attr_data(shape, attr)
data[short_name] = shape_data
return {'customAttributes': data}
def apply(self, shade_set, selection=False):
if 'customAttributes' not in shade_set:
return
for shape, attrs in shade_set['customAttributes'].items():
members = utils.find_shape(shape)
if selection:
nodes = cmds.ls(sl=True, long=True)
members = [m for m in members
if utils.member_in_hierarchy(m, *nodes)]
for attr_name, attr_data in attrs.items():
for member in members:
utils.set_attr_data(member, attr_data)
class ObjectSetsSet(SubSet):
def gather(self, selection):
data = {}
# TODO implement Gather
return {'ObjectSets': data}
def apply(self, shade_set, selection=False):
# TODO Implement Apply
pass
ShadeSet.registry.add(ShadingGroupsSet())
ShadeSet.registry.add(CustomAttributesSet())
ShadeSet.registry.add(ObjectSetsSet())
class RenderLayer(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def __str__(self):
return self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return str(self) == str(other)
@property
def exists(self):
return self.name in self.names()
def create(self):
if self.exists:
raise Exception('This layer already exists...')
cmds.createRenderLayer(name=self.name, empty=True)
def activate(self):
cmds.editRenderLayerGlobals(currentRenderLayer=self.name)
@property
def members(self):
return cmds.editRenderLayerMembers(self.name, q=True, fullNames=True)
def remove_members(self, *members):
args = [self.name] + list(members)
cmds.editRenderLayerMembers(*args, remove=True)
def add_members(self, *members):
args = [self.name] + list(members)
cmds.editRenderLayerMembers(*args, noRecurse=True)
@classmethod
def names(cls):
return cmds.ls(type='renderLayer')
@classmethod
def all(cls):
for layer in cls.names():
return cls(layer)
@classmethod
def active(cls):
return cls(cmds.editRenderLayerGlobals(crl=True, q=True))
@contextmanager
def RenderLayers(layers):
'''Context manager that yields a RenderLayer generator. Restores the
previously active render layer afterwards.'''
old_layer = RenderLayer.active()
try:
yield (RenderLayer(layer) for layer in layers)
finally:
old_layer.activate()
| |
# -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import contextlib
import io
import os
import platform
import re
import socket
import struct
import warnings
from .__version__ import __version__
from . import certs
# to_native_string is unused here, but imported here for backwards compatibility
from ._internal_utils import to_native_string
from .compat import parse_http_list as _parse_list_header
from .compat import (
quote, urlparse, bytes, str, OrderedDict, unquote, getproxies,
proxy_bypass, urlunparse, basestring, integer_types, is_py3,
proxy_bypass_environment, getproxies_environment)
from .cookies import cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import (
InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
if platform.system() == 'Windows':
# provide a proxy_bypass version on Windows without DNS lookups
def proxy_bypass_registry(host):
if is_py3:
import winreg
else:
import _winreg as winreg
try:
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
proxyOverride = winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0]
except OSError:
return False
if not proxyEnable or not proxyOverride:
return False
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in host:
return True
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
if re.match(test, host, re.I):
return True
return False
def proxy_bypass(host): # noqa
"""Return True, if the host should be bypassed.
Checks proxy settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = None
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
if total_length is not None:
current_position = total_length
else:
if hasattr(o, 'seek') and total_length is None:
# StringIO and BytesIO have seek but no useable fileno
try:
# seek to end of file
o.seek(0, 2)
total_length = o.tell()
# seek back to current position to support
# partially read file-like objects
o.seek(current_position or 0)
except (OSError, IOError):
total_length = 0
if total_length is None:
total_length = 0
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/requests/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
return cookiejar_from_dict(cookie_dict, cj)
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if an IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
@contextlib.contextmanager
def set_environ(env_name, value):
"""Set the environment variable 'env_name' to 'value'
Save previous value, yield, and then restore the previous value stored in
the environment variable 'env_name'.
If 'value' is None, do nothing"""
value_changed = value is not None
if value_changed:
old_value = os.environ.get(env_name)
os.environ[env_name] = value
try:
yield
finally:
if value_changed:
if old_value is None:
del os.environ[env_name]
else:
os.environ[env_name] = old_value
def should_bypass_proxies(url, no_proxy):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy_arg = no_proxy
if no_proxy is None:
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
elif ip == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
with set_environ('no_proxy', no_proxy_arg):
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url, no_proxy=None):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url, no_proxy=no_proxy):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get(urlparts.scheme, proxies.get('all'))
proxy_keys = [
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
'all://' + urlparts.hostname,
'all',
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Header value %s must be of type str or bytes, "
"not %s" % (value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
def rewind_body(prepared_request):
"""Move file pointer back to its recorded starting position
so it can be read again on redirect.
"""
body_seek = getattr(prepared_request.body, 'seek', None)
if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
try:
body_seek(prepared_request._body_position)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect.")
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
NLUlite is a high-level Natural Language Understanding framework.
This file is the client part of the framework (for Python > 3.3),
released with BSD license.
"""
__author__ = 'NLUlite'
__version__ = '0.1.12'
__license__ = 'BSD'
## Chech the version
import sys
if sys.version_info < (3, 3):
raise StandardError('You must use python 3.3 or greater')
import socket, copy
import xml.etree.ElementTree as ET
import string, urllib3
from html.parser import HTMLParser
from xml.sax.saxutils import unescape
import os
class NLUliteHTMLParser(HTMLParser):
"""
Helper class for Wisdom.add_url()
"""
def __init__(self):
HTMLParser.__init__(self)
self.current_tag = ''
self.all_text = ''
def handle_starttag(self, tag, attrs):
self.current_tag= tag
def handle_data(self, data):
tag= self.current_tag
if tag != 'script' and tag != 'img':
self.all_text += data
def get_all_text(self):
return self.all_text
class NLUliteWikiParser(HTMLParser):
"""
Helper class for Wisdom.add_url()
"""
def __init__(self):
HTMLParser.__init__(self)
self.current_tag = ''
self.all_text = ''
self.p_tag = False
def handle_starttag(self, tag, attrs):
self.current_tag= tag
if tag == 'p':
self.p_tag = True
def handle_endtag(self, tag):
self.current_tag= tag
if tag == 'p':
self.p_tag = False
self.all_text += '\n'
def handle_data(self, data):
tag= self.current_tag
if self.p_tag:
self.all_text += data
def get_all_text(self):
return self.all_text
class HTMLTemplateFactory():
def __init__(self):
return
def get(self,url):
if(url.find('wikipedia') != -1):
return NLUliteWikiParser()
return NLUliteHTMLParser()
class NLUliteFeedParser(HTMLParser):
"""
Helper class for Wisdom.add_url()
"""
def __init__(self):
HTMLParser.__init__(self)
self.current_tag = ''
self.all_text = ''
self.link = ''
def handle_starttag(self, tag, attrs):
self.current_tag= tag
def handle_data(self, data):
tag= self.current_tag
if tag == 'title':
data.replace("<![CDATA[", " ")
data.replace("]]>"," ")
self.all_text += '[% feed %]'
self.all_text += data + ' \r\n\r\n'
if tag == 'description':
self.all_text += data + ' \r\n\r\n'
def get_all_text(self):
return self.all_text
class FeedTemplateFactory():
def __init__(self):
return
def get(self,url):
return NLUliteFeedParser()
class Rule:
"""
Store one single rule item
"""
def __init__(self):
self.text= ''
self.description= ''
self.weight= 1
class QPair:
"""
Store the query/reply combination in an answer
"""
def __init__(self, query='', reply= ''):
self.query= query
self.reply= reply
class AnswerElement:
"""
Store one single answer item
"""
def __init__(self):
self.text= ''
self.description= ''
self.drs= ''
self.weight= 1
self.pairs= []
self.rules= []
self.wisdom = ''
def comment(self):
writer = Writer(self.wisdom)
return_string = "\n"
for pair in self.pairs:
if pair.reply != None and pair.query != None:
return_string += pair.query + ': ' + pair.reply + '\n'
for rule in self.rules:
if rule.text == None:
continue
return_string += rule.text
if rule != self.rules[-1]:
return_string += '+'
if self.text != None:
return_string += self.text + "\n"
return_string += "\n"
return return_string
class Answer:
"""
Store all the answer information
"""
def __init__(self,wisdom):
self.answer_elements= []
self.question_ID= ''
self.wisdom= wisdom
self.status= ''
def __sort__(self):
self.answer_elements = sorted( self.answer_elements, key= lambda item : item.weight )
def set_elements(self,answer_elements):
self.answer_elements= answer_elements
def set_question_ID(self,qID):
self.question_ID= qID
def elements(self,query=""):
if query == "":
return self.answer_elements
else:
return_list= []
for item in self.answer_elements:
new_item = copy.deepcopy(item)
new_item.pairs = []
for pair in item.pairs:
if pair.query.find(query) != -1:
new_item.pairs.append(pair)
return_list.append(new_item)
return return_list
def is_positive(self):
if self.status.find('yes') != -1 or self.status.find('list') != -1:
return True
return False
def is_negative(self):
if self.status.find('no') != -1:
return True
return False
def is_list(self):
if self.status.find('list') != -1:
return True
return False
def match(self,text):
for item in self.answer_elements:
drs= item.drs
answer= self.wisdom.__match_drs_with_text__(drs,text)
if answer.is_positive():
return answer
ret_answ= self
ret_answ.answer_elements= []
ret_answ.question_ID= ''
ret_answ.status= ''
return ret_answ
def comment(self):
writer = Writer(self.wisdom)
return writer.write(self)
def join(self,rhs):
if self.is_positive() and rhs.is_list():
self.status = rhs.status
self.question_ID = rhs.question_ID
self.wisdom = rhs.wisdom
if self.is_negative() and rhs.is_positive():
self.status = rhs.status
self.question_ID = rhs.question_ID
self.wisdom = rhs.wisdom
self.answer_elements.extend(rhs.answer_elements)
self.__sort__()
def join_answers(answer_list) :
"""
Joins together and sorts a list of answers
"""
answers = answer_list[0]
for item in answer_list[1:] :
answers.join(item)
return answers
class WisdomParameters:
def __init__(self):
self.num_answers = 10
self.accuracy_level = 5
self.solver_options = ''
self.skip_presuppositions = ''
self.skip_solver = 'false'
self.do_solver = 'false'
self.add_data = 'true'
self.word_intersection = 'true'
self.use_pertaynims = 'true'
self.use_synonyms = 'false'
self.use_hyponyms = 'true'
self.num_hyponyms = 2
self.timeout = 10
self.fixed_time = 6
self.max_refs = 3000000
self.max_candidates_refs = 50
self.max_candidates = 50
self.load_clauses = 'true'
self.implicit_verb = 'false'
def set_num_answers(self, num):
self.num_answers = num
def set_accuracy_level(self, num):
self.accuracy_level = num
def set_solver_options(self, options):
self.solver_options = options
def set_skip_presuppositions(self, options):
self.skip_presuppositions = options
def set_skip_solver(self, options):
self.skip_solver = options
def set_do_solver(self, options):
self.do_solver = options
def set_add_data(self, options):
self.add_data = options
def set_timeout(self, options):
self.timeout = options
def set_fixed_time(self, options):
self.fixed_time = options
def set_word_intersection(self, options):
self.word_intersection = options
def set_use_pertaynims(self, options):
self.use_pertaynims = options
def set_max_refs(self, options):
self.max_refs = options
def set_max_candidates_refs(self, options):
self.max_candidates_refs = options
def set_max_candidates(self, options):
self.max_candidates = options
def set_use_synonyms(self, options):
self.use_synonyms = options
def set_use_hyponyms(self, options):
self.use_hyponyms = options
def set_num_hyponyms(self, options):
self.num_hyponyms = options
def set_load_clauses(self, options):
self.load_clauses = options
def set_implicit_verb(self, options):
self.implicit_verb = options
def get_num_answers(self):
return self.num_answers
def get_accuracy_level(self):
return self.accuracy_level
def get_solver_options(self):
return self.solver_options
def get_skip_presuppositions(self):
return self.skip_presuppositions
def get_skip_solver(self):
return self.skip_solver
def get_do_solver(self):
return self.do_solver
def get_add_data(self):
return self.add_data
def get_timeout(self):
return self.timeout
def get_fixed_time(self):
return self.fixed_time
def get_word_intersection(self):
return self.word_intersection
def get_use_pertaynims(self):
return self.use_pertaynims
def get_max_refs(self):
return self.max_refs
def get_max_candidates_refs(self):
return self.max_candidates_refs
def get_max_candidates(self):
return self.max_candidates
def get_use_synonyms(self):
return self.use_synonyms
def get_use_hyponyms(self):
return self.use_hyponyms
def get_num_hyponyms(self):
return self.num_hyponyms
def get_load_clauses(self):
return self.load_clauses
def get_implicit_verb(self):
return self.implicit_verb
def process_query_reply(wisdom,reply):
"""
Auxiliary function for the classes Wisdom and Wikidata.
It processes the reply from the server.
"""
answer_elements= []
if reply == "":
return Answer(self)
root= ''
try:
root= ET.fromstring(reply)
except ET.ParseError:
# If the answer is not well-formed, choose a default answer
answer= Answer(wisdom)
answer.set_question_ID(wisdom.ID + ':no_answer:' + str(answer_elements.__len__()) )
answer.status= ''
return answer
qID= status= ''
for child in root:
if child.tag == 'qID':
qID= child.text
continue
if child.tag == 'status':
status= child.text
continue
text=''
link=''
drs=''
weight=1
pairs= []
rules= []
for c2 in child:
if c2.tag == 'text':
text= c2.text
if c2.tag == 'link':
link= c2.text
if c2.tag == 'drs':
drs= c2.text
if c2.tag == 'weight':
weight= c2.text
if c2.tag == 'data':
for c3 in c2: # <dataitem>
WP= name= ''
for c4 in c3:
if c4.tag == 'WP':
WP= c4.text
if c4.tag == 'name':
name= c4.text
pairs.append( QPair(WP,name) )
if c2.tag == 'rules':
for c3 in c2: # <ruleitem>
rule = Rule()
for c4 in c3:
if c4.tag == 'text':
rule.text = c4.text
if c4.tag == 'link':
rule.description= c4.text
rules.append( rule )
answ= AnswerElement()
answ.text = text
answ.description = link
answ.drs = drs
answ.weight = weight
answ.pairs = pairs
answ.rules = rules
answ.wisdom = wisdom
answer_elements.append( answ )
answer= Answer(wisdom)
answer.set_elements(answer_elements)
answer.set_question_ID(wisdom.ID + ':' + qID.rstrip().lstrip() + ':' + str(answer_elements.__len__()) )
answer.status= status
return answer
class Wisdom:
"""
Process the wisdom
"""
def __init__(self, server):
if not isinstance(server, ServerProxy):
raise TypeError('The server attribute must be set to an instance of NLUlite.ServerProxy')
self.server= server
self.ID= self.server.get_new_ID()
def __match_drs_with_text__(self,drs,question):
reply = self.server.match_drs(drs,question,self.ID)
answer= process_query_reply(self, reply)
return answer
def add(self, text):
reply = self.server.add_data(text, self.ID);
def add_file(self, filename):
filename = os.path.expanduser(filename)
text = open(filename, 'r').read()
reply = self.server.add_data(text, self.ID);
def add_url(self, url):
http = urllib3.PoolManager()
req = http.request('GET',url)
if(req.status != 200):
raise RuntimeError('The page was not found')
page= str( req.data )
parser = HTMLTemplateFactory().get(url)
parser.feed( page )
webtext = parser.get_all_text()
webtext = '[% '+url+' %]\n' + webtext
self.add(webtext)
def add_feed(self, url):
http = urllib3.PoolManager()
req = http.request('GET',url)
page= str(req.data)
page = unescape( page )
feeder = FeedTemplateFactory().get(url)
feeder.feed(page)
text = feeder.get_all_text()
text = '[% '+url+' %]\n' + text
self.add(text)
def save(self, filename):
filename = os.path.expanduser(filename)
reply = self.server.save_wisdom(self.ID);
f= open(filename, "w")
f.write(reply);
f.close();
def save_rdf(self, filename):
filename = os.path.expanduser(filename)
reply = self.server.save_rdf(self.ID);
f= open(filename, "w")
f.write(reply);
f.close();
def save_string(self):
reply = self.server.save_wisdom(self.ID);
return reply
def load(self, filename):
filename = os.path.expanduser(filename)
f= open(filename, "r")
data= f.read()
f.close();
reply = self.server.load_wisdom(data, self.ID);
def load_string(self, string):
data= string
reply = self.server.load_wisdom(data, self.ID);
def ask(self, question):
reply = self.server.query(question, self.ID)
answer = process_query_reply(self, reply)
return answer
def match(self, question):
reply = self.server.match(question, self.ID)
answer= process_query_reply(self, reply)
return answer
def export_to_server(self,key,password="",timer=-1):
reply = self.server.send_to_publish(self.ID, key, password,timer);
if(reply == "<error>"):
raise RuntimeError('Cannot publish wisdom: The key ' + key + ' is already in use.')
def import_from_server(self,key):
reply = self.server.get_from_published(self.ID, key);
if(reply == "<error>"):
raise RuntimeError('Cannot retrieve wisdom: The key ' + key + ' does not exist')
self.ID= reply; # This function erases the wisdom when succesful
def clear(self):
reply = self.server.clear_wisdom(self.ID);
if(reply == "<error>"):
raise RuntimeError('Cannot clear wisdom: The Wisdom.ID ' + self.ID + ' does not exist')
def set_wisdom_parameters(self, wp):
if not isinstance(wp, WisdomParameters):
raise TypeError('The wisdom.set_wisdom_parameters attribute must be set to an instance of NLUlite.WisdomParameters')
self.server.set_wisdom_parameters(self.ID, wp);
class Writer:
"""
Writer class
"""
def __init__(self, wisdom):
if not isinstance(wisdom, Wisdom) and not isinstance(wisdom, Wikidata):
raise TypeError('The wisdom attribute must be set to an instance of NLUlite.Wisdom or NLUlite.wikidata')
self.server = wisdom.server
reply= self.server.get_new_writer_ID(wisdom.ID)
self.ID= reply
def __del__(self):
self.server.writer_erase(self.ID);
def write(self, answer):
if isinstance(answer, AnswerElement):
reply= self.server.writer_write(self.ID, answer.drs)
return reply
if isinstance(answer, Answer):
reply= self.server.writer_write_answer(self.ID, answer.question_ID)
return reply
raise TypeError('The answer attribute must be set to an instance of NLUlite.Anwer or NLUlite.AnswerElement')
class ServerProxy:
"""
Server class
"""
def __init__(self, ip= "localhost", port= 4001):
self.ip = ip
self.port = port
self.wisdom_list= []
self.published_list= []
reply= self.__send('<test>\n<eof>')
if reply != '<ok>':
raise RuntimeError('No valid server seems to be running.')
def __del__(self):
for item in self.wisdom_list:
if(item not in self.published_list):
self.erase(item)
def add_data(self, data, ID):
text = '<data ID=' + ID + '>'
text += data
text += '<eof>'
reply = self.__send(text)
return reply
def save_wisdom(self, ID):
text = '<save ID=' + ID + '>'
text += '<eof>'
reply = self.__send(text)
return reply
def save_rdf(self, ID):
text = '<save_rdf ID=' + ID + '>'
text += '<eof>'
reply = self.__send(text)
return reply
def load_wisdom(self, data, ID):
text = '<load ID=' + ID + '>'
text += data
text += '<eof>'
reply = self.__send(text)
return reply
def query(self, data, ID):
text = '<question ID=' + ID + '>'
text += data
text += '<eof>'
reply = self.__send(text)
return reply
def wikidata_query(self, data, ID):
text = '<wikidata_question ID=' + ID + '>'
text += data
text += '<eof>'
reply = self.__send(text)
return reply
def match(self, data, ID):
text = '<match ID=' + ID + '>'
text += data
text += '<eof>'
reply = self.__send(text)
return reply
def match_drs(self, drs, question, ID):
text = '<match_drs ID=' + ID + '>'
text += drs
text += ";"
text += question
text += '<eof>'
reply = self.__send(text)
return reply
def erase(self, ID):
text = '<erase ID=' + ID + '>'
text += '<eof>'
reply = self.__send(text)
return reply
def get_new_ID(self):
text = '<new_wisdom>\n'
text += '<eof>'
ID = self.__send(text)
self.wisdom_list.append(ID)
return ID
def get_new_wikidata_ID(self):
text = '<new_wikidata>\n'
text += '<eof>'
ID = self.__send(text)
self.wisdom_list.append(ID)
return ID
def get_new_writer_ID(self, wisdom_ID):
text = '<writer_new ID=' + wisdom_ID + '>'
text += '<eof>'
ID = self.__send(text)
return ID
def writer_erase(self, writer_ID):
text = '<writer_erase ID=' + writer_ID + '>'
text += '<eof>'
ID = self.__send(text)
return ID
def writer_write(self, writer_ID, drs):
text = '<writer_write ID=' + writer_ID + '>'
text += drs
text += '<eof>'
ID = self.__send(text)
return ID
def writer_write_answer(self, writer_ID, qID):
text = '<writer_write_answer ID=' + writer_ID + '>'
text += qID.rstrip().lstrip()
text += '<eof>'
ID = self.__send(text)
return ID
def send_to_publish(self, ID, publish_key, password, timer):
text = '<publish ID=' + ID + ' key=' + publish_key + ' passwd=' + password + ' timer=' + str(timer) + '>'
text += '<eof>'
reply = self.__send(text)
if(reply != "<error>"):
self.published_list.append(ID);
return reply
def get_from_published(self, ID, publish_key):
text = '<get_published ID=' + ID + ' key=' + publish_key + '>'
text += '<eof>'
reply = self.__send(text)
return reply
def clear_wisdom(self, ID):
text = '<erase_wisdom ID=' + ID + '>'
text += '<eof>'
reply = self.__send(text)
return reply
def set_wisdom_parameters(self, ID, wp):
accuracy_level = wp.get_accuracy_level()
num_answers = wp.get_num_answers()
solver_options = wp.get_solver_options()
skip_presuppositions = wp.get_skip_presuppositions()
skip_solver = wp.get_skip_solver()
do_solver = wp.get_do_solver()
add_data = wp.get_add_data()
timeout = wp.get_timeout()
fixed_time = wp.get_fixed_time()
max_refs = wp.get_max_refs()
max_candidates_refs = wp.get_max_candidates_refs()
max_candidates = wp.get_max_candidates()
word_intersection = wp.get_word_intersection()
use_pertaynims = wp.get_use_pertaynims()
use_synonyms = wp.get_use_synonyms()
use_hyponyms = wp.get_use_hyponyms()
num_hyponyms = wp.get_num_hyponyms()
load_clauses = wp.get_load_clauses()
implicit_verb = wp.get_implicit_verb()
text = ('<wisdom_parameters '
+ ' accuracy_level=' + str(accuracy_level)
+ ' num_answers=' + str(num_answers)
+ ' solver_options=' + solver_options
+ ' skip_presuppositions=' + skip_presuppositions
+ ' skip_solver=' + skip_solver
+ ' do_solver=' + do_solver
+ ' add_data=' + add_data
+ ' ID=' + ID
+ ' timeout=' + str(timeout)
+ ' fixed_time=' + str(fixed_time)
+ ' max_refs=' + str(max_refs)
+ ' max_candidates_refs=' + str(max_candidates_refs)
+ ' max_candidates=' + str(max_candidates)
+ ' word_intersection=' + word_intersection
+ ' use_pertaynims=' + use_pertaynims
+ ' use_synonyms=' + use_synonyms
+ ' use_hyponyms=' + use_hyponyms
+ ' num_hyponyms=' + str(num_hyponyms)
+ ' implicit_verb=' + implicit_verb
+ '>'
)
text += '<eof>'
reply = self.__send(text)
return reply
def erase_exported(self, publish_key, password= ""):
text = '<erase_published' + ' key=' + publish_key + ' passwd=' + password + '>'
text += '<eof>'
reply = self.__send(text)
if reply == "<error>":
raise RuntimeError('Erasing published Wisdom: wrong key or password.')
return
def list_exported(self):
text = '<list_published>'
text += '<eof>'
reply = self.__send(text)
plist= []
root= ET.fromstring(reply)
for item in root:
plist.append(item.text)
return plist
def get_new_ID(self):
text = '<new_wisdom>\n'
text += '<eof>'
ID = self.__send(text)
self.wisdom_list.append(ID)
return ID
def set_num_threads(self, num_threads):
text = '<server threads=' + str(num_threads) +'>\n'
text += '<eof>'
ID = self.__send(text)
self.wisdom_list.append(ID)
return ID
def __send(self, text):
"""
Helper function for sending information on a socket. Send the 'text'
and return the 'answer'.
"""
sock= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect( (self.ip,self.port) )
# Send the question to the server
totalsent= 0
while totalsent < len(text):
to_send= text[totalsent:].encode('utf-8')
sent= sock.send(to_send)
totalsent += sent
# Receive the answer
answer= answerold= ''
CHUNKLEN= 256
while len(answerold) <= len(answer):
answerold = answer
chunk= sock.recv(CHUNKLEN)
if chunk == b'':
break
answer += chunk.decode('utf-8')
return answer
class Match:
"""
Binds a text to a python function
"""
def __init__(self,text):
self.text= text
self.function_list= []
def __execute__(self,argument):
answer= argument.match(self.text) # The argument can be a Wisdom or an Answer (they both have the method match() )
if answer.is_positive():
for function in self.function_list:
function(answer)
def bind(self,function):
self.function_list.append(function)
class Commands:
"""
Manages the command list
"""
def __init__(self,argument):
if not isinstance(argument,Wisdom) and not isinstance(argument,Answer):
raise TypeError('The argument in Commands() must be set to an instance of NLUlite.Wisdom or NLUlite.Answer')
self.wisdom= argument
self.match_list= []
def parse(self,argument):
self.wisdom= argument
def add(self,match):
if not isinstance(match,Match):
raise TypeError('The match attribute in Commands.add() must be set to an instance of NLUlite.Match')
self.match_list.append(match)
def execute(self):
for match in self.match_list:
match.__execute__(self.wisdom)
class Wikidata:
"""
Answer the question through a query to Wikidata.
It connects to the NLUlite server to transform natural language
into a Wikidata query.
"""
def __init__(self,server):
if not isinstance(server, ServerProxy):
raise TypeError('The server attribute must be set to an instance of NLUlite.ServerProxy')
self.server= server
reply = self.server.get_new_wikidata_ID()
if reply == "<error>":
raise TypeError('You must start the server with the --wikidata option')
self.ID = reply
def ask(self,question):
reply = self.server.wikidata_query(question, self.ID)
answer= process_query_reply(self,reply)
return answer
| |
"""
API views
"""
import hashlib
import itertools
import json
import random
import urllib
from datetime import date, timedelta
from django.core.cache import cache
from django.http import HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import render
from django.template.context import get_standard_processors
from django.utils import encoding, translation
from django.utils.encoding import smart_str
from django.views.decorators.csrf import csrf_exempt
import commonware.log
import jingo
import waffle
from caching.base import cached_with
from piston.utils import rc
from tower import ugettext as _, ugettext_lazy
import amo
import api
from addons.models import Addon, CompatOverride
from amo.decorators import post_required, allow_cross_site_request, json_view
from amo.models import manual_order
from amo.urlresolvers import get_url_prefix
from amo.utils import JSONEncoder
from api.authentication import AMOOAuthAuthentication
from api.forms import PerformanceForm
from api.utils import addon_to_dict, extract_filters
from perf.models import (Performance, PerformanceAppVersions,
PerformanceOSVersion)
from search.views import (AddonSuggestionsAjax, PersonaSuggestionsAjax,
name_query)
from versions.compare import version_int
ERROR = 'error'
OUT_OF_DATE = ugettext_lazy(
u"The API version, {0:.1f}, you are using is not valid. "
u"Please upgrade to the current version {1:.1f} API.")
SEARCHABLE_STATUSES = (amo.STATUS_PUBLIC, amo.STATUS_LITE,
amo.STATUS_LITE_AND_NOMINATED)
xml_env = jingo.env.overlay()
old_finalize = xml_env.finalize
xml_env.finalize = lambda x: amo.helpers.strip_controls(old_finalize(x))
# Hard limit of 30. The buffer is to try for locale-specific add-ons.
MAX_LIMIT, BUFFER = 30, 10
# "New" is arbitrarily defined as 10 days old.
NEW_DAYS = 10
log = commonware.log.getLogger('z.api')
def partition(seq, key):
"""Group a sequence based into buckets by key(x)."""
groups = itertools.groupby(sorted(seq, key=key), key=key)
return ((k, list(v)) for k, v in groups)
def render_xml_to_string(request, template, context={}):
if not jingo._helpers_loaded:
jingo.load_helpers()
for processor in get_standard_processors():
context.update(processor(request))
template = xml_env.get_template(template)
return template.render(context)
def render_xml(request, template, context={}, **kwargs):
"""Safely renders xml, stripping out nasty control characters."""
rendered = render_xml_to_string(request, template, context)
if 'mimetype' not in kwargs:
kwargs['mimetype'] = 'text/xml'
return HttpResponse(rendered, **kwargs)
def handler403(request):
context = {'error_level': ERROR, 'msg': 'Not allowed'}
return render_xml(request, 'api/message.xml', context, status=403)
def handler404(request):
context = {'error_level': ERROR, 'msg': 'Not Found'}
return render_xml(request, 'api/message.xml', context, status=404)
def handler500(request):
context = {'error_level': ERROR, 'msg': 'Server Error'}
return render_xml(request, 'api/message.xml', context, status=500)
def validate_api_version(version):
"""
We want to be able to deprecate old versions of the API, therefore we check
for a minimum API version before continuing.
"""
if float(version) < api.MIN_VERSION:
return False
if float(version) > api.MAX_VERSION:
return False
return True
def addon_filter(addons, addon_type, limit, app, platform, version,
compat_mode='strict', shuffle=True):
"""
Filter addons by type, application, app version, and platform.
Add-ons that support the current locale will be sorted to front of list.
Shuffling will be applied to the add-ons supporting the locale and the
others separately.
Doing this in the database takes too long, so we in code and wrap it in
generous caching.
"""
APP = app
if addon_type.upper() != 'ALL':
try:
addon_type = int(addon_type)
if addon_type:
addons = [a for a in addons if a.type == addon_type]
except ValueError:
# `addon_type` is ALL or a type id. Otherwise we ignore it.
pass
# Take out personas since they don't have versions.
groups = dict(partition(addons,
lambda x: x.type == amo.ADDON_PERSONA))
personas, addons = groups.get(True, []), groups.get(False, [])
platform = platform.lower()
if platform != 'all' and platform in amo.PLATFORM_DICT:
def f(ps):
return pid in ps or amo.PLATFORM_ALL in ps
pid = amo.PLATFORM_DICT[platform]
addons = [a for a in addons
if f(a.current_version.supported_platforms)]
if version is not None:
vint = version_int(version)
def f_strict(app):
return app.min.version_int <= vint <= app.max.version_int
def f_ignore(app):
return app.min.version_int <= vint
xs = [(a, a.compatible_apps) for a in addons]
# Iterate over addons, checking compatibility depending on compat_mode.
addons = []
for addon, apps in xs:
app = apps.get(APP)
if compat_mode == 'strict':
if app and f_strict(app):
addons.append(addon)
elif compat_mode == 'ignore':
if app and f_ignore(app):
addons.append(addon)
elif compat_mode == 'normal':
# This does a db hit but it's cached. This handles the cases
# for strict opt-in, binary components, and compat overrides.
v = addon.compatible_version(APP.id, version, platform,
compat_mode)
if v: # There's a compatible version.
addons.append(addon)
# Put personas back in.
addons.extend(personas)
# We prefer add-ons that support the current locale.
lang = translation.get_language()
def partitioner(x):
return x.description is not None and (x.description.locale == lang)
groups = dict(partition(addons, partitioner))
good, others = groups.get(True, []), groups.get(False, [])
if shuffle:
random.shuffle(good)
random.shuffle(others)
# If limit=0, we return all addons with `good` coming before `others`.
# Otherwise pad `good` if less than the limit and return the limit.
if limit > 0:
if len(good) < limit:
good.extend(others[:limit - len(good)])
return good[:limit]
else:
good.extend(others)
return good
class APIView(object):
"""
Base view class for all API views.
"""
def __call__(self, request, api_version, *args, **kwargs):
self.version = float(api_version)
self.format = request.REQUEST.get('format', 'xml')
self.mimetype = ('text/xml' if self.format == 'xml'
else 'application/json')
self.request = request
if not validate_api_version(api_version):
msg = OUT_OF_DATE.format(self.version, api.CURRENT_VERSION)
return self.render_msg(msg, ERROR, status=403,
mimetype=self.mimetype)
return self.process_request(*args, **kwargs)
def render_msg(self, msg, error_level=None, *args, **kwargs):
"""
Renders a simple message.
"""
if self.format == 'xml':
return render_xml(
self.request, 'api/message.xml',
{'error_level': error_level, 'msg': msg}, *args, **kwargs)
else:
return HttpResponse(json.dumps({'msg': _(msg)}), *args, **kwargs)
def render(self, template, context):
context['api_version'] = self.version
context['api'] = api
if self.format == 'xml':
return render_xml(self.request, template, context,
mimetype=self.mimetype)
else:
return HttpResponse(self.render_json(context),
mimetype=self.mimetype)
def render_json(self, context):
return json.dumps({'msg': _('Not implemented yet.')})
class AddonDetailView(APIView):
@allow_cross_site_request
def process_request(self, addon_id):
try:
addon = Addon.objects.id_or_slug(addon_id).get()
except Addon.DoesNotExist:
return self.render_msg(
'Add-on not found!', ERROR, status=404, mimetype=self.mimetype)
if addon.is_disabled:
return self.render_msg('Add-on disabled.', ERROR, status=404,
mimetype=self.mimetype)
return self.render_addon(addon)
def render_addon(self, addon):
return self.render('api/addon_detail.xml', {'addon': addon})
def render_json(self, context):
return json.dumps(addon_to_dict(context['addon']), cls=JSONEncoder)
def guid_search(request, api_version, guids):
lang = request.LANG
def guid_search_cache_key(guid):
key = 'guid_search:%s:%s:%s' % (api_version, lang, guid)
return hashlib.md5(smart_str(key)).hexdigest()
guids = [g.strip() for g in guids.split(',')] if guids else []
addons_xml = cache.get_many([guid_search_cache_key(g) for g in guids])
dirty_keys = set()
for g in guids:
key = guid_search_cache_key(g)
if key not in addons_xml:
dirty_keys.add(key)
try:
addon = Addon.objects.get(guid=g, disabled_by_user=False,
status__in=SEARCHABLE_STATUSES)
except Addon.DoesNotExist:
addons_xml[key] = ''
else:
addon_xml = render_xml_to_string(request,
'api/includes/addon.xml',
{'addon': addon,
'api_version': api_version,
'api': api})
addons_xml[key] = addon_xml
cache.set_many(dict((k, v) for k, v in addons_xml.iteritems()
if k in dirty_keys))
compat = (CompatOverride.objects.filter(guid__in=guids)
.transform(CompatOverride.transformer))
addons_xml = [v for v in addons_xml.values() if v]
return render_xml(request, 'api/search.xml',
{'addons_xml': addons_xml,
'total': len(addons_xml),
'compat': compat,
'api_version': api_version, 'api': api})
class SearchView(APIView):
def process_request(self, query, addon_type='ALL', limit=10,
platform='ALL', version=None, compat_mode='strict'):
"""
Query the search backend and serve up the XML.
"""
limit = min(MAX_LIMIT, int(limit))
app_id = self.request.APP.id
# We currently filter for status=PUBLIC for all versions. If
# that changes, the contract for API version 1.5 requires
# that we continue filtering for it there.
filters = {
'app': app_id,
'status': amo.STATUS_PUBLIC,
'is_disabled': False,
'has_version': True,
}
# Opts may get overridden by query string filters.
opts = {
'addon_type': addon_type,
'version': version,
}
# Specific case for Personas (bug 990768): if we search providing the
# Persona addon type (9), don't filter on the platform as Personas
# don't have compatible platforms to filter on.
if addon_type != '9':
opts['platform'] = platform
if self.version < 1.5:
# Fix doubly encoded query strings.
try:
query = urllib.unquote(query.encode('ascii'))
except UnicodeEncodeError:
# This fails if the string is already UTF-8.
pass
query, qs_filters, params = extract_filters(query, opts)
qs = Addon.search().query(or_=name_query(query))
filters.update(qs_filters)
if 'type' not in filters:
# Filter by ALL types, which is really all types except for apps.
filters['type__in'] = list(amo.ADDON_SEARCH_TYPES)
qs = qs.filter(**filters)
qs = qs[:limit]
total = qs.count()
results = []
for addon in qs:
compat_version = addon.compatible_version(app_id,
params['version'],
params['platform'],
compat_mode)
# Specific case for Personas (bug 990768): if we search providing
# the Persona addon type (9), then don't look for a compatible
# version.
if compat_version or addon_type == '9':
addon.compat_version = compat_version
results.append(addon)
if len(results) == limit:
break
else:
# We're excluding this addon because there are no
# compatible versions. Decrement the total.
total -= 1
return self.render('api/search.xml', {
'results': results,
'total': total,
# For caching
'version': version,
'compat_mode': compat_mode,
})
@json_view
def search_suggestions(request):
if waffle.sample_is_active('autosuggest-throttle'):
return HttpResponse(status=503)
cat = request.GET.get('cat', 'all')
suggesterClass = {
'all': AddonSuggestionsAjax,
'themes': PersonaSuggestionsAjax,
}.get(cat, AddonSuggestionsAjax)
items = suggesterClass(request, ratings=True).items
for s in items:
s['rating'] = float(s['rating'])
return {'suggestions': items}
class ListView(APIView):
def process_request(self, list_type='recommended', addon_type='ALL',
limit=10, platform='ALL', version=None,
compat_mode='strict'):
"""
Find a list of new or featured add-ons. Filtering is done in Python
for cache-friendliness and to avoid heavy queries.
"""
limit = min(MAX_LIMIT, int(limit))
APP, platform = self.request.APP, platform.lower()
qs = Addon.objects.listed(APP)
shuffle = True
if list_type in ('by_adu', 'featured'):
qs = qs.exclude(type=amo.ADDON_PERSONA)
if list_type == 'newest':
new = date.today() - timedelta(days=NEW_DAYS)
addons = (qs.filter(created__gte=new)
.order_by('-created'))[:limit + BUFFER]
elif list_type == 'by_adu':
addons = qs.order_by('-average_daily_users')[:limit + BUFFER]
shuffle = False # By_adu is an ordered list.
elif list_type == 'hotness':
# Filter to type=1 so we hit visible_idx. Only extensions have a
# hotness index right now so this is not incorrect.
addons = (qs.filter(type=amo.ADDON_EXTENSION)
.order_by('-hotness'))[:limit + BUFFER]
shuffle = False
else:
ids = Addon.featured_random(APP, self.request.LANG)
addons = manual_order(qs, ids[:limit + BUFFER], 'addons.id')
shuffle = False
args = (addon_type, limit, APP, platform, version, compat_mode,
shuffle)
def f():
return self._process(addons, *args)
return cached_with(addons, f, map(encoding.smart_str, args))
def _process(self, addons, *args):
return self.render('api/list.xml',
{'addons': addon_filter(addons, *args)})
def render_json(self, context):
return json.dumps([addon_to_dict(a) for a in context['addons']],
cls=JSONEncoder)
class LanguageView(APIView):
def process_request(self):
addons = Addon.objects.filter(status=amo.STATUS_PUBLIC,
type=amo.ADDON_LPAPP,
appsupport__app=self.request.APP.id,
disabled_by_user=False).order_by('pk')
return self.render('api/list.xml', {'addons': addons,
'show_localepicker': True})
# pylint: disable-msg=W0613
def redirect_view(request, url):
"""
Redirect all requests that come here to an API call with a view parameter.
"""
dest = '/api/%.1f/%s' % (api.CURRENT_VERSION,
urllib.quote(url.encode('utf-8')))
dest = get_url_prefix().fix(dest)
return HttpResponsePermanentRedirect(dest)
def request_token_ready(request, token):
error = request.GET.get('error', '')
ctx = {'error': error, 'token': token}
return render(request, 'piston/request_token_ready.html', ctx)
@csrf_exempt
@post_required
def performance_add(request):
"""
A wrapper around adding in performance data that is easier than
using the piston API.
"""
# Trigger OAuth.
if not AMOOAuthAuthentication(two_legged=True).is_authenticated(request):
return rc.FORBIDDEN
form = PerformanceForm(request.POST)
if not form.is_valid():
return form.show_error()
os, created = (PerformanceOSVersion
.objects.safer_get_or_create(**form.os_version))
app, created = (PerformanceAppVersions
.objects.safer_get_or_create(**form.app_version))
data = form.performance
data.update({'osversion': os, 'appversion': app})
# Look up on everything except the average time.
result, created = Performance.objects.safer_get_or_create(**data)
result.average = form.cleaned_data['average']
result.save()
log.info('Performance created for add-on: %s, %s' %
(form.cleaned_data['addon_id'], form.cleaned_data['average']))
return rc.ALL_OK
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Context management API of mxnet."""
from __future__ import absolute_import
import threading
import warnings
import ctypes
from .base import classproperty, with_metaclass, _MXClassPropertyMetaClass
from .base import _LIB
from .base import check_call
class Context(with_metaclass(_MXClassPropertyMetaClass, object)):
"""Constructs a context.
MXNet can run operations on CPU and different GPUs.
A context describes the device type and ID on which computation should be carried on.
One can use mx.cpu and mx.gpu for short.
See also
----------
`How to run MXNet on multiple CPU/GPUs <http://mxnet.io/faq/multi_devices.html>`
for more details.
Parameters
----------
device_type : {'cpu', 'gpu'} or Context.
String representing the device type.
device_id : int (default=0)
The device id of the device, needed for GPU.
Note
----
Context can also be used as a way to change the default context.
Examples
--------
>>> # array on cpu
>>> cpu_array = mx.nd.ones((2, 3))
>>> # switch default context to GPU(2)
>>> with mx.Context(mx.gpu(2)):
... gpu_array = mx.nd.ones((2, 3))
>>> gpu_array.context
gpu(2)
One can also explicitly specify the context when creating an array.
>>> gpu_array = mx.nd.ones((2, 3), mx.gpu(1))
>>> gpu_array.context
gpu(1)
"""
# static class variable
_default_ctx = threading.local()
devtype2str = {1: 'cpu', 2: 'gpu', 3: 'cpu_pinned', 5: 'cpu_shared'}
devstr2type = {'cpu': 1, 'gpu': 2, 'cpu_pinned': 3, 'cpu_shared': 5}
def __init__(self, device_type, device_id=0):
if isinstance(device_type, Context):
self.device_typeid = device_type.device_typeid
self.device_id = device_type.device_id
else:
self.device_typeid = Context.devstr2type[device_type]
self.device_id = device_id
self._old_ctx = None
@property
def device_type(self):
"""Returns the device type of current context.
Examples
-------
>>> mx.context.current_context().device_type
'cpu'
>>> mx.current_context().device_type
'cpu'
Returns
-------
device_type : str
"""
return Context.devtype2str[self.device_typeid]
def __hash__(self):
"""Compute hash value of context for dictionary lookup"""
return hash((self.device_typeid, self.device_id))
def __eq__(self, other):
"""Compares two contexts. Two contexts are equal if they
have the same device type and device id.
"""
return isinstance(other, Context) and \
self.device_typeid == other.device_typeid and \
self.device_id == other.device_id
def __str__(self):
return '%s(%d)' % (self.device_type, self.device_id)
def __repr__(self):
return self.__str__()
def __enter__(self):
if not hasattr(Context._default_ctx, "value"):
Context._default_ctx.value = Context('cpu', 0)
self._old_ctx = Context._default_ctx.value
Context._default_ctx.value = self
return self
def __exit__(self, ptype, value, trace):
Context._default_ctx.value = self._old_ctx
#pylint: disable=no-self-argument
@classproperty
def default_ctx(cls):
warnings.warn("Context.default_ctx has been deprecated. "
"Please use Context.current_context() instead. "
"Please use test_utils.set_default_context to set a default context",
DeprecationWarning)
if not hasattr(Context._default_ctx, "value"):
cls._default_ctx.value = Context('cpu', 0)
return cls._default_ctx.value
@default_ctx.setter
def default_ctx(cls, val):
warnings.warn("Context.default_ctx has been deprecated. "
"Please use Context.current_context() instead. "
"Please use test_utils.set_default_context to set a default context",
DeprecationWarning)
cls._default_ctx.value = val
#pylint: enable=no-self-argument
def empty_cache(self):
"""Empties the memory cache for the current contexts device.
MXNet utilizes a memory pool to avoid excessive allocations.
Calling empty_cache will empty the memory pool of the contexts
device. This will only free the memory of the unreferenced data.
Examples
-------
>>> ctx = mx.gpu(0)
>>> arr = mx.nd.ones((200,200), ctx=ctx)
>>> del arr
>>> ctx.empty_cache() # forces release of memory allocated for arr
"""
dev_type = ctypes.c_int(self.device_typeid)
dev_id = ctypes.c_int(self.device_id)
check_call(_LIB.MXStorageEmptyCache(dev_type, dev_id))
# initialize the default context in Context
Context._default_ctx.value = Context('cpu', 0)
def cpu(device_id=0):
"""Returns a CPU context.
This function is a short cut for ``Context('cpu', device_id)``.
For most operations, when no context is specified, the default context is `cpu()`.
Examples
----------
>>> with mx.cpu():
... cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu(0)
>>> cpu_array = mx.nd.ones((2, 3), ctx=mx.cpu())
>>> cpu_array.context
cpu(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
context : Context
The corresponding CPU context.
"""
return Context('cpu', device_id)
def cpu_pinned(device_id=0):
"""Returns a CPU pinned memory context. Copying from CPU pinned memory to GPU
is faster than from normal CPU memory.
This function is a short cut for ``Context('cpu_pinned', device_id)``.
Examples
----------
>>> with mx.cpu_pinned():
... cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu_pinned(0)
>>> cpu_array = mx.nd.ones((2, 3), ctx=mx.cpu_pinned())
>>> cpu_array.context
cpu_pinned(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
context : Context
The corresponding CPU pinned memory context.
"""
return Context('cpu_pinned', device_id)
def gpu(device_id=0):
"""Returns a GPU context.
This function is a short cut for Context('gpu', device_id).
The K GPUs on a node are typically numbered as 0,...,K-1.
Examples
----------
>>> cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu(0)
>>> with mx.gpu(1):
... gpu_array = mx.nd.ones((2, 3))
>>> gpu_array.context
gpu(1)
>>> gpu_array = mx.nd.ones((2, 3), ctx=mx.gpu(1))
>>> gpu_array.context
gpu(1)
Parameters
----------
device_id : int, optional
The device id of the device, needed for GPU.
Returns
-------
context : Context
The corresponding GPU context.
"""
return Context('gpu', device_id)
def num_gpus():
"""Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs.
"""
count = ctypes.c_int()
check_call(_LIB.MXGetGPUCount(ctypes.byref(count)))
return count.value
def gpu_memory_info(device_id=0):
"""Query CUDA for the free and total bytes of GPU global memory.
Parameters
----------
device_id : int, optional
The device id of the GPU device.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
(free, total) : (int, int)
The number of GPUs.
"""
free = ctypes.c_uint64()
total = ctypes.c_uint64()
dev_id = ctypes.c_int(device_id)
check_call(_LIB.MXGetGPUMemoryInformation64(dev_id, ctypes.byref(free), ctypes.byref(total)))
return (free.value, total.value)
def current_context():
"""Returns the current context.
By default, `mx.cpu()` is used for all the computations
and it can be overridden by using `with mx.Context(x)` statement where
x can be cpu(device_id) or gpu(device_id).
Examples
-------
>>> mx.current_context()
cpu(0)
>>> with mx.Context('gpu', 1): # Context changed in `with` block.
... mx.current_context() # Computation done here will be on gpu(1).
...
gpu(1)
>>> mx.current_context() # Back to default context.
cpu(0)
Returns
-------
default_ctx : Context
"""
if not hasattr(Context._default_ctx, "value"):
Context._default_ctx.value = Context('cpu', 0)
return Context._default_ctx.value
| |
#-*- coding: utf-8 -*-
#
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <niemeyer@conectiva.com>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.interfaces.qt4.packageview import QtPackageView
from smart.interfaces.qt4 import getPixmap, centerWindow
from smart.util.strtools import sizeToStr
from smart.report import Report
from smart import *
#import PyQt4.QtGui as QtGui
#import PyQt4.QtCore as QtCore
from PyQt4 import QtGui as QtGui
from PyQt4 import QtCore as QtCore
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class QtChanges(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.setWindowIcon(QtGui.QIcon(getPixmap("smart")))
self.setWindowTitle(_("Change Summary"))
self.setModal(True)
self.setMinimumSize(600, 400)
centerWindow(self)
self._vbox = QtGui.QVBoxLayout(self)
self._vbox.setMargin(10)
self._vbox.setSpacing(10)
self._label = QtGui.QLabel(self)
self._vbox.addWidget(self._label)
self._pv = QtPackageView(self)
self._pv.getTreeView().header().hide()
self._pv.setExpandPackage(True)
self._pv.show()
self._vbox.addWidget(self._pv)
self._sizelabel = QtGui.QLabel("", self)
self._vbox.addWidget(self._sizelabel)
self._confirmbbox = QtGui.QWidget(self)
layout = QtGui.QHBoxLayout(self._confirmbbox)
layout.setSpacing(10)
layout.addStretch(1)
self._vbox.addWidget(self._confirmbbox)
self._cancelbutton = QtGui.QPushButton(_("Cancel"), self._confirmbbox)
QtCore.QObject.connect( self._cancelbutton, QtCore.SIGNAL("clicked()"), self, QtCore.SLOT("reject()"))
self._okbutton = QtGui.QPushButton(_("OK"), self._confirmbbox)
QtCore.QObject.connect( self._okbutton, QtCore.SIGNAL("clicked()"), self, QtCore.SLOT("accept()"))
self._closebbox = QtGui.QWidget(self)
layout = QtGui.QHBoxLayout(self._closebbox)
layout.setSpacing(10)
layout.addStretch(1)
self._vbox.addWidget(self._closebbox)
self._closebutton = QtGui.QPushButton(_("Close"), self._closebbox)
QtCore.QObject.connect( self._closebutton, QtCore.SIGNAL("clicked()"), self, QtCore.SLOT("close()"))
def showChangeSet(self, changeset, keep=None, confirm=False, label=None):
report = Report(changeset)
report.compute()
class Sorter(unicode):
ORDER = [_("Remove"), _("Downgrade"), _("Reinstall"),
_("Install"), _("Upgrade")]
def _index(self, s):
i = 0
for os in self.ORDER:
if os.startswith(s):
return i
i += 1
return i
def __cmp__(self, other):
return cmp(self._index(unicode(self)), self._index(unicode(other)))
def __lt__(self, other):
return cmp(self, other) < 0
packages = {}
if report.install:
install = {}
reinstall = {}
upgrade = {}
downgrade = {}
lst = report.install.keys()
lst.sort()
for pkg in lst:
package = {}
done = {}
if pkg in report.upgrading:
for upgpkg in report.upgrading[pkg]:
package.setdefault(_("Upgrades"), []).append(upgpkg)
done[upgpkg] = True
if pkg in report.downgrading:
for dwnpkg in report.downgrading[pkg]:
package.setdefault(_("Downgrades"), []).append(dwnpkg)
done[dwnpkg] = True
if pkg in report.requires:
for reqpkg in report.requires[pkg]:
package.setdefault(_("Requires"), []).append(reqpkg)
if pkg in report.requiredby:
for reqpkg in report.requiredby[pkg]:
package.setdefault(_("Required By"), []).append(reqpkg)
if pkg in report.conflicts:
for cnfpkg in report.conflicts[pkg]:
if cnfpkg in done:
continue
package.setdefault(_("Conflicts"), []).append(cnfpkg)
if pkg.installed:
reinstall[pkg] = package
elif pkg in report.upgrading:
upgrade[pkg] = package
elif pkg in report.downgrading:
downgrade[pkg] = package
else:
install[pkg] = package
if reinstall:
packages[Sorter(_("Reinstall (%d)") % len(reinstall))] = reinstall
if install:
packages[Sorter(_("Install (%d)") % len(install))] = install
if upgrade:
packages[Sorter(_("Upgrade (%d)") % len(upgrade))] = upgrade
if downgrade:
packages[Sorter(_("Downgrade (%d)") % len(downgrade))] = downgrade
if report.removed:
remove = {}
lst = report.removed.keys()
lst.sort()
for pkg in lst:
package = {}
done = {}
if pkg in report.requires:
for reqpkg in report.requires[pkg]:
package.setdefault(_("Requires"), []).append(reqpkg)
if pkg in report.requiredby:
for reqpkg in report.requiredby[pkg]:
package.setdefault(_("Required By"), []).append(reqpkg)
if pkg in report.conflicts:
for cnfpkg in report.conflicts[pkg]:
if cnfpkg in done:
continue
package.setdefault(_("Conflicts"), []).append(cnfpkg)
remove[pkg] = package
if remove:
packages[Sorter(_("Remove (%d)") % len(report.removed))] = remove
if keep:
packages[Sorter(_("Keep (%d)") % len(keep))] = keep
dsize = report.getDownloadSize()
size = report.getInstallSize() - report.getRemoveSize()
sizestr = ""
if dsize:
sizestr += _("%s of package files are needed. ") % sizeToStr(dsize)
if size > 0:
sizestr += _("%s will be used.") % sizeToStr(size)
elif size < 0:
size *= -1
sizestr += _("%s will be freed.") % sizeToStr(size)
if dsize or size:
self._sizelabel.setText(sizestr)
self._sizelabel.show()
else:
self._sizelabel.hide()
if confirm:
self._confirmbbox.show()
self._closebbox.hide()
self._okbutton.setDefault(True)
else:
self._closebbox.show()
self._confirmbbox.hide()
self._closebutton.setDefault(True)
if label:
self._label.set_text(label)
self._label.show()
else:
self._label.hide()
self._pv.setPackages(packages, changeset)
# Expand first level
self._pv.setExpanded([(x,) for x in packages])
self._result = False
self.show()
dialogResult = self.exec_()
self._result = (dialogResult == QtGui.QDialog.Accepted)
return self._result
# vim:ts=4:sw=4:et
| |
"""Translation/Localization functions.
Provides :mod:`gettext` translation functions via an app's
``pylons.translator`` and get/set_lang for changing the language
translated to.
"""
import os
from gettext import NullTranslations, translation
import pylons
__all__ = ['_', 'add_fallback', 'get_lang', 'gettext', 'gettext_noop',
'lazy_gettext', 'lazy_ngettext', 'lazy_ugettext', 'lazy_ungettext',
'ngettext', 'set_lang', 'ugettext', 'ungettext', 'LanguageError',
'N_']
class LanguageError(Exception):
"""Exception raised when a problem occurs with changing languages"""
pass
class LazyString(object):
"""Has a number of lazily evaluated functions replicating a
string. Just override the eval() method to produce the actual value.
This method copied from TurboGears.
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def eval(self):
return self.func(*self.args, **self.kwargs)
def __unicode__(self):
return unicode(self.eval())
def __str__(self):
return str(self.eval())
def __mod__(self, other):
return self.eval() % other
def format(self, *args):
return self.eval().format(*args)
def lazify(func):
"""Decorator to return a lazy-evaluated version of the original"""
def newfunc(*args, **kwargs):
return LazyString(func, *args, **kwargs)
newfunc.__name__ = 'lazy_%s' % func.__name__
newfunc.__doc__ = 'Lazy-evaluated version of the %s function\n\n%s' % \
(func.__name__, func.__doc__)
return newfunc
def gettext_noop(value):
"""Mark a string for translation without translating it. Returns
value.
Used for global strings, e.g.::
foo = N_('Hello')
class Bar:
def __init__(self):
self.local_foo = _(foo)
h.set_lang('fr')
assert Bar().local_foo == 'Bonjour'
h.set_lang('es')
assert Bar().local_foo == 'Hola'
assert foo == 'Hello'
"""
return value
N_ = gettext_noop
def gettext(value):
"""Mark a string for translation. Returns the localized string of
value.
Mark a string to be localized as follows::
gettext('This should be in lots of languages')
"""
return pylons.translator.gettext(value)
lazy_gettext = lazify(gettext)
def ugettext(value):
"""Mark a string for translation. Returns the localized unicode
string of value.
Mark a string to be localized as follows::
_('This should be in lots of languages')
"""
return pylons.translator.ugettext(value)
_ = ugettext
lazy_ugettext = lazify(ugettext)
def ngettext(singular, plural, n):
"""Mark a string for translation. Returns the localized string of
the pluralized value.
This does a plural-forms lookup of a message id. ``singular`` is
used as the message id for purposes of lookup in the catalog, while
``n`` is used to determine which plural form to use. The returned
message is a string.
Mark a string to be localized as follows::
ngettext('There is %(num)d file here', 'There are %(num)d files here',
n) % {'num': n}
"""
return pylons.translator.ngettext(singular, plural, n)
lazy_ngettext = lazify(ngettext)
def ungettext(singular, plural, n):
"""Mark a string for translation. Returns the localized unicode
string of the pluralized value.
This does a plural-forms lookup of a message id. ``singular`` is
used as the message id for purposes of lookup in the catalog, while
``n`` is used to determine which plural form to use. The returned
message is a Unicode string.
Mark a string to be localized as follows::
ungettext('There is %(num)d file here', 'There are %(num)d files here',
n) % {'num': n}
"""
return pylons.translator.ungettext(singular, plural, n)
lazy_ungettext = lazify(ungettext)
def _get_translator(lang, **kwargs):
"""Utility method to get a valid translator object from a language
name"""
if not lang:
return NullTranslations()
if 'pylons_config' in kwargs:
conf = kwargs.pop('pylons_config')
else:
conf = pylons.config.current_conf()
localedir = os.path.join(conf['pylons.paths']['root'], 'i18n')
if not isinstance(lang, list):
lang = [lang]
try:
translator = translation(conf['pylons.package'], localedir,
languages=lang, **kwargs)
except IOError, ioe:
raise LanguageError('IOError: %s' % ioe)
translator.pylons_lang = lang
return translator
def set_lang(lang, set_environ=True, **kwargs):
"""Set the current language used for translations.
``lang`` should be a string or a list of strings. If a list of
strings, the first language is set as the main and the subsequent
languages are added as fallbacks.
"""
translator = _get_translator(lang, **kwargs)
if not set_environ:
return translator
environ = pylons.request.environ
environ['pylons.pylons'].translator = translator
if 'paste.registry' in environ:
environ['paste.registry'].replace(pylons.translator, translator)
def get_lang():
"""Return the current i18n language used"""
return getattr(pylons.translator, 'pylons_lang', None)
def add_fallback(lang, **kwargs):
"""Add a fallback language from which words not matched in other
languages will be translated to.
This fallback will be associated with the currently selected
language -- that is, resetting the language via set_lang() resets
the current fallbacks.
This function can be called multiple times to add multiple
fallbacks.
"""
return pylons.translator.add_fallback(_get_translator(lang, **kwargs))
| |
"""
Support for DLNA DMR (Device Media Renderer).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.dlna_dmr/
"""
import asyncio
from datetime import datetime
from datetime import timedelta
import functools
import logging
from typing import Optional
import aiohttp
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL, MEDIA_TYPE_EPISODE, MEDIA_TYPE_IMAGE,
MEDIA_TYPE_MOVIE, MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK, SUPPORT_STOP,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET)
from homeassistant.const import (
CONF_NAME, CONF_URL, EVENT_HOMEASSISTANT_STOP, STATE_IDLE, STATE_OFF,
STATE_ON, STATE_PAUSED, STATE_PLAYING)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import HomeAssistantType
import homeassistant.helpers.config_validation as cv
from homeassistant.util import get_local_ip
REQUIREMENTS = ['async-upnp-client==0.14.7']
_LOGGER = logging.getLogger(__name__)
DLNA_DMR_DATA = 'dlna_dmr'
DEFAULT_NAME = 'DLNA Digital Media Renderer'
DEFAULT_LISTEN_PORT = 8301
CONF_LISTEN_IP = 'listen_ip'
CONF_LISTEN_PORT = 'listen_port'
CONF_CALLBACK_URL_OVERRIDE = 'callback_url_override'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_LISTEN_IP): cv.string,
vol.Optional(CONF_LISTEN_PORT, default=DEFAULT_LISTEN_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CALLBACK_URL_OVERRIDE): cv.url,
})
HOME_ASSISTANT_UPNP_CLASS_MAPPING = {
MEDIA_TYPE_MUSIC: 'object.item.audioItem',
MEDIA_TYPE_TVSHOW: 'object.item.videoItem',
MEDIA_TYPE_MOVIE: 'object.item.videoItem',
MEDIA_TYPE_VIDEO: 'object.item.videoItem',
MEDIA_TYPE_EPISODE: 'object.item.videoItem',
MEDIA_TYPE_CHANNEL: 'object.item.videoItem',
MEDIA_TYPE_IMAGE: 'object.item.imageItem',
MEDIA_TYPE_PLAYLIST: 'object.item.playlist',
}
UPNP_CLASS_DEFAULT = 'object.item'
HOME_ASSISTANT_UPNP_MIME_TYPE_MAPPING = {
MEDIA_TYPE_MUSIC: 'audio/*',
MEDIA_TYPE_TVSHOW: 'video/*',
MEDIA_TYPE_MOVIE: 'video/*',
MEDIA_TYPE_VIDEO: 'video/*',
MEDIA_TYPE_EPISODE: 'video/*',
MEDIA_TYPE_CHANNEL: 'video/*',
MEDIA_TYPE_IMAGE: 'image/*',
MEDIA_TYPE_PLAYLIST: 'playlist/*',
}
def catch_request_errors():
"""Catch asyncio.TimeoutError, aiohttp.ClientError errors."""
def call_wrapper(func):
"""Call wrapper for decorator."""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
"""Catch asyncio.TimeoutError, aiohttp.ClientError errors."""
try:
return func(self, *args, **kwargs)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Error during call %s", func.__name__)
return wrapper
return call_wrapper
async def async_start_event_handler(
hass: HomeAssistantType,
server_host: str,
server_port: int,
requester,
callback_url_override: Optional[str] = None):
"""Register notify view."""
hass_data = hass.data[DLNA_DMR_DATA]
if 'event_handler' in hass_data:
return hass_data['event_handler']
# start event handler
from async_upnp_client.aiohttp import AiohttpNotifyServer
server = AiohttpNotifyServer(
requester,
listen_port=server_port,
listen_host=server_host,
loop=hass.loop,
callback_url=callback_url_override)
await server.start_server()
_LOGGER.info(
'UPNP/DLNA event handler listening, url: %s', server.callback_url)
hass_data['notify_server'] = server
hass_data['event_handler'] = server.event_handler
# register for graceful shutdown
async def async_stop_server(event):
"""Stop server."""
_LOGGER.debug('Stopping UPNP/DLNA event handler')
await server.stop_server()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_server)
return hass_data['event_handler']
async def async_setup_platform(
hass: HomeAssistantType,
config,
async_add_entities,
discovery_info=None):
"""Set up DLNA DMR platform."""
if config.get(CONF_URL) is not None:
url = config[CONF_URL]
name = config.get(CONF_NAME)
elif discovery_info is not None:
url = discovery_info['ssdp_description']
name = discovery_info.get('name')
if DLNA_DMR_DATA not in hass.data:
hass.data[DLNA_DMR_DATA] = {}
if 'lock' not in hass.data[DLNA_DMR_DATA]:
hass.data[DLNA_DMR_DATA]['lock'] = asyncio.Lock()
# build upnp/aiohttp requester
from async_upnp_client.aiohttp import AiohttpSessionRequester
session = async_get_clientsession(hass)
requester = AiohttpSessionRequester(session, True)
# ensure event handler has been started
with await hass.data[DLNA_DMR_DATA]['lock']:
server_host = config.get(CONF_LISTEN_IP)
if server_host is None:
server_host = get_local_ip()
server_port = config.get(CONF_LISTEN_PORT, DEFAULT_LISTEN_PORT)
callback_url_override = config.get(CONF_CALLBACK_URL_OVERRIDE)
event_handler = await async_start_event_handler(
hass, server_host, server_port, requester, callback_url_override)
# create upnp device
from async_upnp_client import UpnpFactory
factory = UpnpFactory(requester, disable_state_variable_validation=True)
try:
upnp_device = await factory.async_create_device(url)
except (asyncio.TimeoutError, aiohttp.ClientError):
raise PlatformNotReady()
# wrap with DmrDevice
from async_upnp_client.profiles.dlna import DmrDevice
dlna_device = DmrDevice(upnp_device, event_handler)
# create our own device
device = DlnaDmrDevice(dlna_device, name)
_LOGGER.debug("Adding device: %s", device)
async_add_entities([device], True)
class DlnaDmrDevice(MediaPlayerDevice):
"""Representation of a DLNA DMR device."""
def __init__(self, dmr_device, name=None):
"""Initializer."""
self._device = dmr_device
self._name = name
self._available = False
self._subscription_renew_time = None
async def async_added_to_hass(self):
"""Handle addition."""
self._device.on_event = self._on_event
# Register unsubscribe on stop
bus = self.hass.bus
bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, self._async_on_hass_stop)
@property
def available(self):
"""Device is available."""
return self._available
async def _async_on_hass_stop(self, event):
"""Event handler on HASS stop."""
with await self.hass.data[DLNA_DMR_DATA]['lock']:
await self._device.async_unsubscribe_services()
async def async_update(self):
"""Retrieve the latest data."""
was_available = self._available
try:
await self._device.async_update()
self._available = True
except (asyncio.TimeoutError, aiohttp.ClientError):
self._available = False
_LOGGER.debug("Device unavailable")
return
# do we need to (re-)subscribe?
now = datetime.now()
should_renew = self._subscription_renew_time and \
now >= self._subscription_renew_time
if should_renew or \
not was_available and self._available:
try:
timeout = await self._device.async_subscribe_services()
self._subscription_renew_time = datetime.now() + timeout / 2
except (asyncio.TimeoutError, aiohttp.ClientError):
self._available = False
_LOGGER.debug("Could not (re)subscribe")
def _on_event(self, service, state_variables):
"""State variable(s) changed, let home-assistant know."""
self.schedule_update_ha_state()
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported_features = 0
if self._device.has_volume_level:
supported_features |= SUPPORT_VOLUME_SET
if self._device.has_volume_mute:
supported_features |= SUPPORT_VOLUME_MUTE
if self._device.has_play:
supported_features |= SUPPORT_PLAY
if self._device.has_pause:
supported_features |= SUPPORT_PAUSE
if self._device.has_stop:
supported_features |= SUPPORT_STOP
if self._device.has_previous:
supported_features |= SUPPORT_PREVIOUS_TRACK
if self._device.has_next:
supported_features |= SUPPORT_NEXT_TRACK
if self._device.has_play_media:
supported_features |= SUPPORT_PLAY_MEDIA
if self._device.has_seek_rel_time:
supported_features |= SUPPORT_SEEK
return supported_features
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._device.volume_level
@catch_request_errors()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._device.async_set_volume_level(volume)
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._device.is_volume_muted
@catch_request_errors()
async def async_mute_volume(self, mute):
"""Mute the volume."""
desired_mute = bool(mute)
await self._device.async_mute_volume(desired_mute)
@catch_request_errors()
async def async_media_pause(self):
"""Send pause command."""
if not self._device.can_pause:
_LOGGER.debug('Cannot do Pause')
return
await self._device.async_pause()
@catch_request_errors()
async def async_media_play(self):
"""Send play command."""
if not self._device.can_play:
_LOGGER.debug('Cannot do Play')
return
await self._device.async_play()
@catch_request_errors()
async def async_media_stop(self):
"""Send stop command."""
if not self._device.can_stop:
_LOGGER.debug('Cannot do Stop')
return
await self._device.async_stop()
@catch_request_errors()
async def async_media_seek(self, position):
"""Send seek command."""
if not self._device.can_seek_rel_time:
_LOGGER.debug('Cannot do Seek/rel_time')
return
time = timedelta(seconds=position)
await self._device.async_seek_rel_time(time)
@catch_request_errors()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
title = "Home Assistant"
mime_type = HOME_ASSISTANT_UPNP_MIME_TYPE_MAPPING.get(media_type,
media_type)
upnp_class = HOME_ASSISTANT_UPNP_CLASS_MAPPING.get(media_type,
UPNP_CLASS_DEFAULT)
# Stop current playing media
if self._device.can_stop:
await self.async_media_stop()
# Queue media
await self._device.async_set_transport_uri(
media_id, title, mime_type, upnp_class)
await self._device.async_wait_for_can_play()
# If already playing, no need to call Play
from async_upnp_client.profiles.dlna import DeviceState
if self._device.state == DeviceState.PLAYING:
return
# Play it
await self.async_media_play()
@catch_request_errors()
async def async_media_previous_track(self):
"""Send previous track command."""
if not self._device.can_previous:
_LOGGER.debug('Cannot do Previous')
return
await self._device.async_previous()
@catch_request_errors()
async def async_media_next_track(self):
"""Send next track command."""
if not self._device.can_next:
_LOGGER.debug('Cannot do Next')
return
await self._device.async_next()
@property
def media_title(self):
"""Title of current playing media."""
return self._device.media_title
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._device.media_image_url
@property
def state(self):
"""State of the player."""
if not self._available:
return STATE_OFF
from async_upnp_client.profiles.dlna import DeviceState
if self._device.state is None:
return STATE_ON
if self._device.state == DeviceState.PLAYING:
return STATE_PLAYING
if self._device.state == DeviceState.PAUSED:
return STATE_PAUSED
return STATE_IDLE
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._device.media_duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._device.media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self._device.media_position_updated_at
@property
def name(self) -> str:
"""Return the name of the device."""
if self._name:
return self._name
return self._device.name
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return self._device.udn
| |
import os.path
import urwid
import netlib.utils
from . import pathedit, signals, common
from .. import utils
class ActionBar(urwid.WidgetWrap):
def __init__(self):
urwid.WidgetWrap.__init__(self, None)
self.clear()
signals.status_message.connect(self.sig_message)
signals.status_prompt.connect(self.sig_prompt)
signals.status_prompt_path.connect(self.sig_path_prompt)
signals.status_prompt_onekey.connect(self.sig_prompt_onekey)
self.last_path = ""
self.prompting = False
self.onekey = False
self.pathprompt = False
def sig_message(self, sender, message, expire=None):
w = urwid.Text(message)
self._w = w
if expire:
def cb(*args):
if w == self._w:
self.clear()
signals.call_in.send(seconds=expire, callback=cb)
def prep_prompt(self, p):
return p.strip() + ": "
def sig_prompt(self, sender, prompt, text, callback, args=()):
signals.focus.send(self, section="footer")
self._w = urwid.Edit(self.prep_prompt(prompt), text or "")
self.prompting = (callback, args)
def sig_path_prompt(self, sender, prompt, callback, args=()):
signals.focus.send(self, section="footer")
self._w = pathedit.PathEdit(
self.prep_prompt(prompt),
os.path.dirname(self.last_path)
)
self.pathprompt = True
self.prompting = (callback, args)
def sig_prompt_onekey(self, sender, prompt, keys, callback, args=()):
"""
Keys are a set of (word, key) tuples. The appropriate key in the
word is highlighted.
"""
signals.focus.send(self, section="footer")
prompt = [prompt, " ("]
mkup = []
for i, e in enumerate(keys):
mkup.extend(common.highlight_key(e[0], e[1]))
if i < len(keys) - 1:
mkup.append(",")
prompt.extend(mkup)
prompt.append(")? ")
self.onekey = set(i[1] for i in keys)
self._w = urwid.Edit(prompt, "")
self.prompting = (callback, args)
def selectable(self):
return True
def keypress(self, size, k):
if self.prompting:
if k == "esc":
self.prompt_done()
elif self.onekey:
if k == "enter":
self.prompt_done()
elif k in self.onekey:
self.prompt_execute(k)
elif k == "enter":
self.prompt_execute(self._w.get_edit_text())
else:
if common.is_keypress(k):
self._w.keypress(size, k)
else:
return k
def clear(self):
self._w = urwid.Text("")
def prompt_done(self):
self.prompting = False
self.onekey = False
self.pathprompt = False
signals.status_message.send(message="")
signals.focus.send(self, section="body")
def prompt_execute(self, txt):
if self.pathprompt:
self.last_path = txt
p, args = self.prompting
self.prompt_done()
msg = p(txt, *args)
if msg:
signals.status_message.send(message=msg, expire=1)
class StatusBar(urwid.WidgetWrap):
def __init__(self, master, helptext):
self.master, self.helptext = master, helptext
self.ab = ActionBar()
self.ib = urwid.WidgetWrap(urwid.Text(""))
self._w = urwid.Pile([self.ib, self.ab])
signals.update_settings.connect(self.sig_update_settings)
signals.flowlist_change.connect(self.sig_update_settings)
self.redraw()
def sig_update_settings(self, sender):
self.redraw()
def keypress(self, *args, **kwargs):
return self.ab.keypress(*args, **kwargs)
def get_status(self):
r = []
if self.master.setheaders.count():
r.append("[")
r.append(("heading_key", "H"))
r.append("eaders]")
if self.master.replacehooks.count():
r.append("[")
r.append(("heading_key", "R"))
r.append("eplacing]")
if self.master.client_playback:
r.append("[")
r.append(("heading_key", "cplayback"))
r.append(":%s to go]" % self.master.client_playback.count())
if self.master.server_playback:
r.append("[")
r.append(("heading_key", "splayback"))
if self.master.nopop:
r.append(":%s in file]" % self.master.server_playback.count())
else:
r.append(":%s to go]" % self.master.server_playback.count())
if self.master.get_ignore_filter():
r.append("[")
r.append(("heading_key", "I"))
r.append("gnore:%d]" % len(self.master.get_ignore_filter()))
if self.master.get_tcp_filter():
r.append("[")
r.append(("heading_key", "T"))
r.append("CP:%d]" % len(self.master.get_tcp_filter()))
if self.master.state.intercept_txt:
r.append("[")
r.append(("heading_key", "i"))
r.append(":%s]" % self.master.state.intercept_txt)
if self.master.state.limit_txt:
r.append("[")
r.append(("heading_key", "l"))
r.append(":%s]" % self.master.state.limit_txt)
if self.master.stickycookie_txt:
r.append("[")
r.append(("heading_key", "t"))
r.append(":%s]" % self.master.stickycookie_txt)
if self.master.stickyauth_txt:
r.append("[")
r.append(("heading_key", "u"))
r.append(":%s]" % self.master.stickyauth_txt)
if self.master.state.default_body_view.name != "Auto":
r.append("[")
r.append(("heading_key", "M"))
r.append(":%s]" % self.master.state.default_body_view.name)
opts = []
if self.master.anticache:
opts.append("anticache")
if self.master.anticomp:
opts.append("anticomp")
if self.master.showhost:
opts.append("showhost")
if not self.master.refresh_server_playback:
opts.append("norefresh")
if self.master.killextra:
opts.append("killextra")
if self.master.server.config.no_upstream_cert:
opts.append("no-upstream-cert")
if self.master.state.follow_focus:
opts.append("following")
if self.master.stream_large_bodies:
opts.append(
"stream:%s" % netlib.utils.pretty_size(
self.master.stream_large_bodies.max_size
)
)
if opts:
r.append("[%s]" % (":".join(opts)))
if self.master.server.config.mode in ["reverse", "upstream"]:
dst = self.master.server.config.upstream_server
r.append("[dest:%s]" % netlib.utils.unparse_url(
dst.scheme,
dst.address.host,
dst.address.port
))
if self.master.scripts:
r.append("[")
r.append(("heading_key", "s"))
r.append("cripts:%s]" % len(self.master.scripts))
# r.append("[lt:%0.3f]"%self.master.looptime)
if self.master.stream:
r.append("[W:%s]" % self.master.stream_path)
return r
def redraw(self):
fc = self.master.state.flow_count()
if self.master.state.focus is None:
offset = 0
else:
offset = min(self.master.state.focus + 1, fc)
t = [
('heading', ("[%s/%s]" % (offset, fc)).ljust(9))
]
if self.master.server.bound:
host = self.master.server.address.host
if host == "0.0.0.0":
host = "*"
boundaddr = "[%s:%s]" % (host, self.master.server.address.port)
else:
boundaddr = ""
t.extend(self.get_status())
status = urwid.AttrWrap(urwid.Columns([
urwid.Text(t),
urwid.Text(
[
self.helptext,
boundaddr
],
align="right"
),
]), "heading")
self.ib._w = status
def update(self, text):
self.helptext = text
self.redraw()
self.master.loop.draw_screen()
def selectable(self):
return True
| |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Edward Mountjoy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from src.probabilisticSeqMatch import sequences_match_prob
from src.probabilisticSeqMatch import base_prob
from src.fastqparser import phred_score_dict
from src.fastqparser import fastqIterator
from src.fastqparser import Fastq
from src.fastqparser import fastqWriter
from src.progressbar import Bar
from operator import itemgetter
from datetime import timedelta
from shutil import rmtree
import glob
import gzip
import sys
import os
#import concurrent.futures as cf
def run(args):
print("Precomputing base probabilities...")
# Precompute string to phred scores dictionary
phred_dict = phred_score_dict(args.phredOffset)
# Precompute base probabilities for phredscores up to 50
base_prob_precompute = {}
for letter in phred_dict:
base_prob_precompute[letter] = base_prob(phred_dict[letter])
# Convert index qual argument to a qual character
args.indexQual = chr(args.indexQual + args.phredOffset)
print("Searching for fastqs...")
# Check that the multiplexed path exists
multiplexed_dir = os.path.join(args.inDir, "multiplexed")
if not os.path.exists(multiplexed_dir):
sys.exit("Directory '<inDir>/multiplexed' does not exist. Re-run with"
" different <inDir>")
# Create out directory
out_dir = "demultiplexed"
if args.uniqID != None:
out_dir += "_{0}".format(args.uniqID)
out_dir = os.path.join(args.inDir, out_dir)
create_folder(out_dir)
# Initiate multiplexed class
multiplexed = Multiplex(multiplexed_dir)
print("Loading index sequences...")
# Initiate sample sheet and read possible indexes
sampleSheet = SampleSheet(args.sampleSheet)
sampleSheet.parse(args.indexQual, base_prob_precompute)
# Check that there are the same number of indexes in sample sheet and
# multiplexed fastqs
if sampleSheet.is_dualindexed != multiplexed.is_dualindexed:
sys.exit("Error: Different number of indexes in sampleSheet and "
"multiplexed reads. Exiting!")
print("Initiating...")
# Open output class for each sample, and a not_assigned group
sample_out = {}
for sample in list(sampleSheet.sample_indexes.keys()) + ['not_assigned']:
sample_out[sample] = Sample(sample, out_dir, multiplexed.is_pairend,
multiplexed.is_dualindexed)
# Initiate progress bar
num_records = file_len(multiplexed.barcode_paths[0]) / 4
bar = Bar('Demultiplexing', max=int(num_records/10000),
suffix='%(percent)d%% %(eta)a secs')
c = 1
for variables in futures_iterate_reads(base_prob_precompute,
multiplexed, sampleSheet, args.minProb):
# Get output
output = futures_barcode_to_indexes(variables)
# Unpack output
((read_records, barcode_records), sample, prob, _) = output
# Write record to correct sample file
sample_out[sample].write(read_records, barcode_records)
# Update progress
if c % 10000 == 0:
bar.next()
c += 1
# Close progress bar
bar.finish()
# Close all sample handles
for sample_name in sample_out:
sample_out[sample_name].close_handles()
print("Finished!")
"""
# Send each read/barcode record to futures to match up to sample
with cf.ProcessPoolExecutor(max_workers=args.numCPU) as executor:
c = 1
# Map read/barcode records
for output in executor.map(futures_barcode_to_indexes,
futures_iterate_reads(multiplexed, sampleSheet,
base_prob_precompute, args.minProb)):
# Unpack output
((read_records, barcode_records), sample, prob, _) = output
# Write record to correct sample file
sample_out[sample].write(read_records, barcode_records)
# Update progress
if c % 1000 == 0:
print(c)
c += 1
"""
return 0
def futures_iterate_reads(base_prob_precompute, multiplexed, sampleSheet,
min_prob):
""" Returns an iterator that contains everything needed for futures.
"""
for combined_record in multiplexed.iterate(base_prob_precompute):
yield (combined_record, sampleSheet, min_prob)
def futures_barcode_to_indexes(variables):
""" Compares the reads barcodes to sample indexes and returns matching
sample name.
"""
# Unpack variables
(combined_record, sampleSheet, min_prob) = variables
# Get barcode records
_, barcode_records = combined_record
# Find sample
b1_header, sample, prob = match_barcode_to_indexes(barcode_records,
sampleSheet, min_prob)
if sample == None:
sample = 'not_assigned'
# Append probability to barcode1 header
b1_header = "{0} {1}".format(b1_header, prob)
# Change header
combined_record[1][0].id = b1_header
return combined_record, sample, prob, b1_header
def match_barcode_to_indexes(barcode_records, sampleSheet, min_prob):
""" For the barcode pair, caluclates probability of a match against each set
of indexes
"""
index_probs = {}
for sample_name in sampleSheet.sample_indexes:
index_records = sampleSheet.sample_indexes[sample_name]
# Calculate the match probability for barcode 1
b1_prob = sequences_match_prob(index_records[0].seq,
index_records[0].qual_prob,
barcode_records[0].seq,
barcode_records[0].qual_prob, 0)
# Do for second barcode if present
if sampleSheet.is_dualindexed:
# Skip if already below the threshold, else assign same prob as b1
if b1_prob >= min_prob:
b2_prob = sequences_match_prob(index_records[1].seq,
index_records[1].qual_prob,
barcode_records[1].seq,
barcode_records[1].qual_prob, 0)
else:
b2_prob = b1_prob
# Caluclate combined probability
if sampleSheet.is_dualindexed:
overall_prob = b1_prob * b2_prob
else:
overall_prob = b1_prob
# Save result
index_probs[sample_name] = overall_prob
# Sort the results by their probability
sorted_probs = sorted(index_probs.items(), key=itemgetter(1),
reverse=True)
# Return header, sample, prob
header = barcode_records[0].id
if sorted_probs[0][1] > min_prob:
return header, sorted_probs[0][0], sorted_probs[0][1]
else:
return header, None, sorted_probs[0][1]
class Sample:
# Class for each possible sample. 1) Holds the output directory for that
# sample. 2) Opens handles. 3) Writes record to sample.
def __init__(self, name, out_dir, is_pe, id_dual):
self.read_paths = []
self.barcode_paths = []
self.read_handles = None
self.barcode_handles = None
# Create directory for sample
name = name.replace(' ', '_')
self.sample_dir = os.path.join(out_dir, name)
create_folder(self.sample_dir)
# Create read paths
self.read_paths.append(os.path.join(self.sample_dir,
'{0}.R1.fastq.gz'.format(name)))
if is_pe:
self.read_paths.append(os.path.join(self.sample_dir,
'{0}.R2.fastq.gz'.format(name)))
# Create barcode paths
self.barcode_paths.append(os.path.join(self.sample_dir,
'{0}.barcode_1.fastq.gz'.format(name)))
if id_dual:
self.barcode_paths.append(os.path.join(self.sample_dir,
'{0}.barcode_2.fastq.gz'.format(name)))
def open_handles(self):
""" For the reads and barcodes, opens output handles.
"""
self.read_handles = [get_handle(read_path, 'w') for read_path
in self.read_paths]
self.barcode_handles = [get_handle(barcode_path, 'w') for barcode_path
in self.barcode_paths]
return 0
def write(self, read_records, barcode_records):
""" Writes the demultiplexed read and barcode records to sample file.
"""
# Open handles if not open
if self.read_handles == None:
self.open_handles()
# Write read records
for i in range(len(read_records)):
fastqWriter(read_records[i], self.read_handles[i])
# Write barcode records
for i in range(len(barcode_records)):
fastqWriter(barcode_records[i], self.barcode_handles[i])
return 0
def close_handles(self):
""" Closes any open handles.
"""
if self.read_handles != None:
for handle in self.read_handles + self.barcode_handles:
handle.close()
return 0
class SampleSheet:
# Class to hold the sample sheet and retrieve indexes from it.
def __init__(self, path):
self.path = path
def parse(self, index_qual, base_prob_precompute):
""" Parses the sample sheet to retrieve the indexes for each sample.
"""
sample_indexes = {}
with open(self.path, 'r') as in_h:
# Skip to line after [Data]
line = in_h.readline()
while not line.startswith('[Data]'):
line = in_h.readline()
# Get header
header = in_h.readline().rstrip().lower().split(',')
col_ind = dict(zip(header, range(len(header))))
# Save whether it is dual indexed
if "index2" in col_ind.keys():
self.is_dualindexed = True
else:
self.is_dualindexed = False
# Get indexes
for line in in_h:
# Break if EOF
if line.strip() == "":
break
# Get info
parts = line.rstrip().split(',')
sample_name = parts[col_ind['sample_name']]
# If sample_name is empty, take sample_id instead
if sample_name == "":
sample_name = parts[col_ind['sample_id']]
# Get first index
index1 = parts[col_ind['index']]
sample_indexes[sample_name] = [index1]
# Get second index
if self.is_dualindexed:
index2 = parts[col_ind['index2']]
sample_indexes[sample_name].append(index2)
# Convert indexes to seqIO seqRecords
self.sample_indexes = self.convert_index_to_fastqRecord(sample_indexes,
index_qual, base_prob_precompute)
return 0
def convert_index_to_fastqRecord(self, sample_indexes, index_qual,
base_prob_precompute):
""" Converts each index sequence to a seqIO seqRecord.
"""
# For each sample
for sample in sample_indexes:
# For each index
for i in range(len(sample_indexes[sample])):
raw_seq = sample_indexes[sample][i]
qual = [index_qual] * len(raw_seq)
# Convert to fastqRecord
record = Fastq(None, raw_seq, qual)
# Calculate base probabilities
record.qual_to_prob(base_prob_precompute)
# Save record
sample_indexes[sample][i] = record
return sample_indexes
class Multiplex:
# Class for the folder of multiplexed reads + barcodes
def __init__(self, folder):
""" Make list of read and barcode files.
"""
self.dir = folder
# Get list of read and barcode paths
self.read_paths = []
self.barcode_paths = []
for fastq in sorted(glob.glob(os.path.join(folder, "*.fastq*"))):
if "barcode_" in os.path.split(fastq)[1]:
self.barcode_paths.append(fastq)
else:
self.read_paths.append(fastq)
# Save whether pairend
if len(self.read_paths) == 1:
self.is_pairend = False
elif len(self.read_paths) == 2:
self.is_pairend = True
else:
sys.exit("There must be 1 or 2 input read fastqs, not {0}".format(
len(self.read_paths)))
# Save whether dualindex
if len(self.barcode_paths) == 1:
self.is_dualindexed = False
elif len(self.barcode_paths) == 2:
self.is_dualindexed = True
else:
sys.exit("There must be 1 or 2 input barcode fastqs, not"
" {0}".format(len(self.barcode_paths)))
return None
def open_handles(self):
""" Opens the file names for reading.
"""
read_handles = [get_handle(filen, 'r') for filen in self.read_paths]
barcode_handles = [get_handle(filen, 'r') for filen
in self.barcode_paths]
return read_handles, barcode_handles
def open_iterators(self, read_handles, barcode_handles):
""" Opens fastq iterators using biopythons SeqIO
"""
# Open iterators for each handle
read_iterators = [fastqIterator(handle) for handle
in read_handles]
barcode_iterators = [fastqIterator(handle) for handle
in barcode_handles]
return read_iterators, barcode_iterators
def iterate(self, base_prob_precompute):
""" Loads the reads and barcode fastqs and yields 1 set at a time.
"""
# Open handles
read_handles, barcode_handles = self.open_handles()
# Open iterators for each handle
read_iterators, barcode_iterators = self.open_iterators(
read_handles, barcode_handles)
# Iterate through records
for r1_record in read_iterators[0]:
# Get read records
read_records = [r1_record]
if self.is_pairend:
read_records.append(next(read_iterators[1]))
# Get barcode records
barcode_records = [next(barcode_iterators[0])]
if self.is_dualindexed:
barcode_records.append(next(barcode_iterators[1]))
# Check that they all have the same title
titles = [record.id.split(" ")[0] for record in read_records + barcode_records]
if len(set(titles)) > 1:
sys.exit('Reads and/or barcodes are not in sync\n'
'{0}'.format(titles))
# Calculate base probabilities for barcodes
for i in range(len(barcode_records)):
barcode_records[i].qual_to_prob(base_prob_precompute)
yield [read_records, barcode_records]
# Close handles
for handle in read_handles + barcode_handles:
handle.close()
def create_folder(folder):
""" Check out folder exists and create a new one.
"""
# Check if it exists
if os.path.exists(folder):
response = input('{0} exists. Would you like to overwrite it? [y/n] '.format(folder))
if response == 'y':
rmtree(folder)
else:
sys.exit()
os.makedirs(folder)
return folder
def get_handle(filen, rw):
""" Returns file handle using gzip if file ends in .gz
"""
if filen.split('.')[-1] == 'gz':
return gzip.open(filen, rw)
else:
return open(filen, rw)
def file_len(fname):
""" Count number of lines in a file.
"""
with get_handle(fname, 'r') as f:
for i, l in enumerate(f):
pass
return i + 1
| |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/MSCommon/vs.py 4043 2009/02/23 09:06:45 scons"
__doc__ = """Module to detect Visual Studio and/or Visual C/C++
"""
import os
import SCons.Errors
import SCons.Util
from SCons.Tool.MSCommon.common import debug, \
read_reg, \
normalize_env, \
get_output, \
parse_output
class VisualStudio:
"""
An abstract base class for trying to find installed versions of
Visual Studio.
"""
def __init__(self, version, **kw):
self.version = version
self.__dict__.update(kw)
self._cache = {}
#
def find_batch_file(self):
"""Try to find the Visual Studio or Visual C/C++ batch file.
Return None if failed or the batch file does not exist.
"""
pdir = self.get_vc_product_dir()
if not pdir:
debug('find_batch_file(): no pdir')
return None
batch_file = os.path.normpath(os.path.join(pdir, self.batch_file))
batch_file = os.path.normpath(batch_file)
if not os.path.isfile(batch_file):
debug('find_batch_file(): %s not on file system' % batch_file)
return None
return batch_file
def find_executable(self):
pdir = self.get_vc_product_dir()
if not pdir:
debug('find_executable(): no pdir')
return None
executable = os.path.join(pdir, self.executable_path)
executable = os.path.normpath(executable)
if not os.path.isfile(executable):
debug('find_executable(): %s not on file system' % executable)
return None
return executable
def find_vc_product_dir(self):
if not SCons.Util.can_read_reg:
debug('find_vc_product_dir(): can not read registry')
return None
key = self.hkey_root + '\\' + self.vc_product_dir_key
try:
comps = read_reg(key)
except WindowsError, e:
debug('find_vc_product_dir(): no registry key %s' % key)
else:
if self.batch_file_dir_reg_relpath:
comps = os.path.join(comps, self.batch_file_dir_reg_relpath)
comps = os.path.normpath(comps)
if os.path.exists(comps):
return comps
else:
debug('find_vc_product_dir(): %s not on file system' % comps)
d = os.environ.get(self.common_tools_var)
if not d:
msg = 'find_vc_product_dir(): no %s variable'
debug(msg % self.common_tools_var)
return None
if not os.path.isdir(d):
debug('find_vc_product_dir(): %s not on file system' % d)
return None
if self.batch_file_dir_env_relpath:
d = os.path.join(d, self.batch_file_dir_env_relpath)
d = os.path.normpath(d)
return d
#
def get_batch_file(self):
try:
return self._cache['batch_file']
except KeyError:
batch_file = self.find_batch_file()
self._cache['batch_file'] = batch_file
return batch_file
def get_executable(self):
try:
return self._cache['executable']
except KeyError:
executable = self.find_executable()
self._cache['executable'] = executable
return executable
def get_supported_arch(self):
try:
return self._cache['supported_arch']
except KeyError:
# RDEVE: for the time being use hardcoded lists
# supported_arch = self.find_supported_arch()
self._cache['supported_arch'] = self.supported_arch
return self.supported_arch
def get_vc_product_dir(self):
try:
return self._cache['vc_product_dir']
except KeyError:
vc_product_dir = self.find_vc_product_dir()
self._cache['vc_product_dir'] = vc_product_dir
return vc_product_dir
def reset(self):
self._cache = {}
# The list of supported Visual Studio versions we know how to detect.
#
# How to look for .bat file ?
# - VS 2008 Express (x86):
# * from registry key productdir, gives the full path to vsvarsall.bat. In
# HKEY_LOCAL_MACHINE):
# Software\Microsoft\VCEpress\9.0\Setup\VC\productdir
# * from environmnent variable VS90COMNTOOLS: the path is then ..\..\VC
# relatively to the path given by the variable.
#
# - VS 2008 Express (WoW6432: 32 bits on windows x64):
# Software\Wow6432Node\Microsoft\VCEpress\9.0\Setup\VC\productdir
#
# - VS 2005 Express (x86):
# * from registry key productdir, gives the full path to vsvarsall.bat. In
# HKEY_LOCAL_MACHINE):
# Software\Microsoft\VCEpress\8.0\Setup\VC\productdir
# * from environmnent variable VS80COMNTOOLS: the path is then ..\..\VC
# relatively to the path given by the variable.
#
# - VS 2005 Express (WoW6432: 32 bits on windows x64): does not seem to have a
# productdir ?
#
# - VS 2003 .Net (pro edition ? x86):
# * from registry key productdir. The path is then ..\Common7\Tools\
# relatively to the key. The key is in HKEY_LOCAL_MACHINE):
# Software\Microsoft\VisualStudio\7.1\Setup\VC\productdir
# * from environmnent variable VS71COMNTOOLS: the path is the full path to
# vsvars32.bat
#
# - VS 98 (VS 6):
# * from registry key productdir. The path is then Bin
# relatively to the key. The key is in HKEY_LOCAL_MACHINE):
# Software\Microsoft\VisualStudio\6.0\Setup\VC98\productdir
#
# The first version found in the list is the one used by default if
# there are multiple versions installed. Barring good reasons to
# the contrary, this means we should list versions from most recent
# to oldest. Pro versions get listed before Express versions on the
# assumption that, by default, you'd rather use the version you paid
# good money for in preference to whatever Microsoft makes available
# for free.
#
# If you update this list, update the documentation in Tool/msvs.xml.
SupportedVSList = [
# Visual Studio 2010
# TODO: find the settings, perhaps from someone with a CTP copy?
#VisualStudio('TBD',
# hkey_root=r'TBD',
# common_tools_var='TBD',
# batch_file='TBD',
# vc_product_dir_key=r'TBD',
# batch_file_dir_reg_relpath=None,
# batch_file_dir_env_relpath=r'TBD',
# executable_path=r'TBD',
# default_dirname='TBD',
#),
# Visual Studio 2008
# The batch file we look for is in the VC directory,
# so the devenv.com executable is up in ..\..\Common7\IDE.
VisualStudio('9.0',
hkey_root=r'Software\Microsoft\VisualStudio\9.0',
common_tools_var='VS90COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
executable_path=r'..\Common7\IDE\devenv.com',
default_dirname='Microsoft Visual Studio 9',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2008 Express Edition
# The batch file we look for is in the VC directory,
# so the VCExpress.exe executable is up in ..\..\Common7\IDE.
VisualStudio('9.0Exp',
hkey_root=r'Software\Microsoft\VisualStudio\9.0',
common_tools_var='VS90COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
executable_path=r'..\Common7\IDE\VCExpress.exe',
default_dirname='Microsoft Visual Studio 9',
supported_arch=['x86'],
),
# Visual Studio 2005
# The batch file we look for is in the VC directory,
# so the devenv.com executable is up in ..\..\Common7\IDE.
VisualStudio('8.0',
hkey_root=r'Software\Microsoft\VisualStudio\8.0',
common_tools_var='VS80COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
executable_path=r'..\Common7\IDE\devenv.com',
default_dirname='Microsoft Visual Studio 8',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2005 Express Edition
# The batch file we look for is in the VC directory,
# so the VCExpress.exe executable is up in ..\..\Common7\IDE.
VisualStudio('8.0Exp',
hkey_root=r'Software\Microsoft\VCExpress\8.0',
common_tools_var='VS80COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
# The batch file is in the VC directory, so
# so the devenv.com executable is next door in ..\IDE.
executable_path=r'..\Common7\IDE\VCExpress.exe',
default_dirname='Microsoft Visual Studio 8',
supported_arch=['x86'],
),
# Visual Studio .NET 2003
# The batch file we look for is in the Common7\Tools directory,
# so the devenv.com executable is next door in ..\IDE.
VisualStudio('7.1',
hkey_root=r'Software\Microsoft\VisualStudio\7.1',
common_tools_var='VS71COMNTOOLS',
batch_file='vsvars32.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=r'..\Common7\Tools',
batch_file_dir_env_relpath=None,
executable_path=r'..\IDE\devenv.com',
default_dirname='Microsoft Visual Studio .NET',
supported_arch=['x86'],
),
# Visual Studio .NET
# The batch file we look for is in the Common7\Tools directory,
# so the devenv.com executable is next door in ..\IDE.
VisualStudio('7.0',
hkey_root=r'Software\Microsoft\VisualStudio\7.0',
common_tools_var='VS70COMNTOOLS',
batch_file='vsvars32.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=r'..\Common7\Tools',
batch_file_dir_env_relpath=None,
executable_path=r'..\IDE\devenv.com',
default_dirname='Microsoft Visual Studio .NET',
supported_arch=['x86'],
),
# Visual Studio 6.0
VisualStudio('6.0',
hkey_root=r'Software\Microsoft\VisualStudio\6.0',
common_tools_var='VS60COMNTOOLS',
batch_file='vcvars32.bat',
vc_product_dir_key='Setup\Microsoft Visual C++\ProductDir',
batch_file_dir_reg_relpath='Bin',
batch_file_dir_env_relpath=None,
executable_path=r'Common\MSDev98\Bin\MSDEV.COM',
default_dirname='Microsoft Visual Studio',
supported_arch=['x86'],
),
]
SupportedVSMap = {}
for vs in SupportedVSList:
SupportedVSMap[vs.version] = vs
# Finding installed versions of Visual Studio isn't cheap, because it
# goes not only to the registry but also to the disk to sanity-check
# that there is, in fact, a Visual Studio directory there and that the
# registry entry isn't just stale. Find this information once, when
# requested, and cache it.
InstalledVSList = None
InstalledVSMap = None
def get_installed_visual_studios():
global InstalledVSList
global InstalledVSMap
if InstalledVSList is None:
InstalledVSList = []
InstalledVSMap = {}
for vs in SupportedVSList:
debug('trying to find VS %s' % vs.version)
if vs.get_executable():
debug('found VS %s' % vs.version)
InstalledVSList.append(vs)
InstalledVSMap[vs.version] = vs
return InstalledVSList
def reset_installed_visual_studios():
global InstalledVSList
global InstalledVSMap
InstalledVSList = None
InstalledVSMap = None
for vs in SupportedVSList:
vs.reset()
# We may be asked to update multiple construction environments with
# SDK information. When doing this, we check on-disk for whether
# the SDK has 'mfc' and 'atl' subdirectories. Since going to disk
# is expensive, cache results by directory.
#SDKEnvironmentUpdates = {}
#
#def set_sdk_by_directory(env, sdk_dir):
# global SDKEnvironmentUpdates
# try:
# env_tuple_list = SDKEnvironmentUpdates[sdk_dir]
# except KeyError:
# env_tuple_list = []
# SDKEnvironmentUpdates[sdk_dir] = env_tuple_list
#
# include_path = os.path.join(sdk_dir, 'include')
# mfc_path = os.path.join(include_path, 'mfc')
# atl_path = os.path.join(include_path, 'atl')
#
# if os.path.exists(mfc_path):
# env_tuple_list.append(('INCLUDE', mfc_path))
# if os.path.exists(atl_path):
# env_tuple_list.append(('INCLUDE', atl_path))
# env_tuple_list.append(('INCLUDE', include_path))
#
# env_tuple_list.append(('LIB', os.path.join(sdk_dir, 'lib')))
# env_tuple_list.append(('LIBPATH', os.path.join(sdk_dir, 'lib')))
# env_tuple_list.append(('PATH', os.path.join(sdk_dir, 'bin')))
#
# for variable, directory in env_tuple_list:
# env.PrependENVPath(variable, directory)
def detect_msvs():
return (len(get_installed_visual_studios()) > 0)
def get_vs_by_version(msvs):
if not SupportedVSMap.has_key(msvs):
msg = "Visual Studio version %s is not supported" % repr(msvs)
raise SCons.Errors.UserError, msg
get_installed_visual_studios()
vs = InstalledVSMap.get(msvs)
# Some check like this would let us provide a useful error message
# if they try to set a Visual Studio version that's not installed.
# However, we also want to be able to run tests (like the unit
# tests) on systems that don't, or won't ever, have it installed.
# It might be worth resurrecting this, with some configurable
# setting that the tests can use to bypass the check.
#if not vs:
# msg = "Visual Studio version %s is not installed" % repr(msvs)
# raise SCons.Errors.UserError, msg
return vs
def get_default_version(env):
"""Returns the default version string to use for MSVS.
If no version was requested by the user through the MSVS environment
variable, query all the available the visual studios through
query_versions, and take the highest one.
Return
------
version: str
the default version.
"""
if not env.has_key('MSVS') or not SCons.Util.is_Dict(env['MSVS']):
# TODO(1.5):
#versions = [vs.version for vs in get_installed_visual_studios()]
versions = map(lambda vs: vs.version, get_installed_visual_studios())
env['MSVS'] = {'VERSIONS' : versions}
else:
versions = env['MSVS'].get('VERSIONS', [])
if not env.has_key('MSVS_VERSION'):
if versions:
env['MSVS_VERSION'] = versions[0] #use highest version by default
else:
env['MSVS_VERSION'] = SupportedVSList[0].version
env['MSVS']['VERSION'] = env['MSVS_VERSION']
return env['MSVS_VERSION']
def get_default_arch(env):
"""Return the default arch to use for MSVS
if no version was requested by the user through the MSVS_ARCH environment
variable, select x86
Return
------
arch: str
"""
arch = env.get('MSVS_ARCH', 'x86')
msvs = InstalledVSMap.get(env['MSVS_VERSION'])
if not msvs:
arch = 'x86'
elif not arch in msvs.get_supported_arch():
fmt = "Visual Studio version %s does not support architecture %s"
raise SCons.Errors.UserError, fmt % (env['MSVS_VERSION'], arch)
return arch
def merge_default_version(env):
version = get_default_version(env)
arch = get_default_arch(env)
msvs = get_vs_by_version(version)
if msvs is None:
return
batfilename = msvs.get_batch_file()
# XXX: I think this is broken. This will silently set a bogus tool instead
# of failing, but there is no other way with the current scons tool
# framework
if batfilename is not None:
vars = ('LIB', 'LIBPATH', 'PATH', 'INCLUDE')
msvs_list = get_installed_visual_studios()
# TODO(1.5):
#vscommonvarnames = [ vs.common_tools_var for vs in msvs_list ]
vscommonvarnames = map(lambda vs: vs.common_tools_var, msvs_list)
nenv = normalize_env(env['ENV'], vscommonvarnames + ['COMSPEC'])
output = get_output(batfilename, arch, env=nenv)
vars = parse_output(output, vars)
for k, v in vars.items():
env.PrependENVPath(k, v, delete_existing=1)
def query_versions():
"""Query the system to get available versions of VS. A version is
considered when a batfile is found."""
msvs_list = get_installed_visual_studios()
# TODO(1.5)
#versions = [ msvs.version for msvs in msvs_list ]
versions = map(lambda msvs: msvs.version, msvs_list)
return versions
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.