gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from sixquiprend.models.card import Card
from sixquiprend.models.chosen_card import ChosenCard
from sixquiprend.models.column import Column
from sixquiprend.models.hand import Hand
from sixquiprend.models.heap import Heap
from sixquiprend.models.six_qui_prend_exception import SixQuiPrendException
from sixquiprend.models.user import User
from sixquiprend.sixquiprend import app, db
import random
class Game(db.Model):
STATUS_CREATED = 0
STATUS_STARTED = 1
STATUS_FINISHED = 2
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.Integer, nullable=False, default=STATUS_CREATED)
is_resolving_turn = db.Column(db.Boolean, default=False)
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
hands = db.relationship('Hand', backref='game', lazy='dynamic',
cascade="all, delete, delete-orphan")
heaps = db.relationship('Heap', backref='game', lazy='dynamic',
cascade="all, delete, delete-orphan")
columns = db.relationship('Column', backref='game', lazy='dynamic',
cascade="all, delete, delete-orphan")
chosen_cards = db.relationship('ChosenCard', backref='game', lazy='dynamic',
cascade="all, delete, delete-orphan")
################################################################################
## Getters
################################################################################
def find(game_id):
game = Game.query.get(game_id)
if not game:
raise SixQuiPrendException('Game doesn\'t exist', 404)
return game
def find_user(self, user_id):
user = self.users.filter(User.id==user_id).first()
if not user:
raise SixQuiPrendException('User not in game', 404)
return user
def find_column(self, column_id):
column = self.columns.filter(Column.id == column_id).first()
if not column:
raise SixQuiPrendException('Column not found', 404)
return column
def find_chosen_card(self, user_id):
user = self.find_user(user_id)
chosen_card = self.chosen_cards.filter(ChosenCard.user_id ==
user_id).first()
if not chosen_card:
raise SixQuiPrendException('Chosen card not found', 404)
return chosen_card
def get_user_hand(self, user_id):
user = self.find_user(user_id)
hand = self.hands.filter(Hand.user_id == user.id).first()
return hand
def get_user_heap(self, user_id):
user = self.find_user(user_id)
heap = self.heaps.filter(Heap.user_id == user_id).first()
return heap
def get_user_status(self, user_id):
user = self.find_user(user_id)
user_dict = user.serialize()
user_dict['has_chosen_card'] = self.get_user_chosen_card(user_id) != None
user_dict['needs_to_choose_column'] = self.user_needs_to_choose_column(user_id)
return user_dict
def get_user_chosen_card(self, user_id):
self.find_user(user_id)
return self.chosen_cards.filter(ChosenCard.user_id == user_id).first()
def check_is_started(self):
if self.status != Game.STATUS_STARTED:
raise SixQuiPrendException('Game not started', 400)
def check_is_owner(self, user_id):
self.find_user(user_id)
if self.owner_id != user_id:
raise SixQuiPrendException('User is not game owner', 403)
def get_results(self):
results = {}
if self.status == Game.STATUS_CREATED:
return results
for user in self.users.all():
user_game_heap = self.get_user_heap(user.id)
results[user.username] = user_game_heap.get_value()
return results
def get_lowest_value_column(self):
column_value = 9000
for column in self.columns:
tmp_column_value = column.get_value()
if tmp_column_value < column_value:
lowest_value_column = column
column_value = tmp_column_value
elif tmp_column_value == column_value and random.random() > 0.5:
lowest_value_column = column
return lowest_value_column
def get_suitable_column(self, chosen_card):
if chosen_card.game_id != self.id:
raise SixQuiPrendException('Chosen card does not belong to the game',
422)
diff = 9000
chosen_column = None
for column in self.columns:
last_card = sorted(column.cards, key=lambda card: card.number)[-1]
diff_temp = chosen_card.card.number - last_card.number
if diff_temp > 0 and diff_temp < diff:
chosen_column = column
diff = diff_temp
if chosen_column == None:
raise SixQuiPrendException('User ' + str(chosen_card.user_id) + ' must choose a column', 422)
return chosen_column
def get_available_bots(self):
bots = User.query.filter(User.urole == User.ROLE_BOT).order_by(User.id).all()
available_bots = []
for bot in bots:
if not bot in self.users.all():
available_bots.append(bot)
return available_bots
def get_chosen_cards_for_current_user(self, current_user_id):
self.find_user(current_user_id)
if not self.is_resolving_turn:
chosen_cards = self.chosen_cards.filter(ChosenCard.user_id == current_user_id)
if chosen_cards.count() == 0:
raise SixQuiPrendException('You haven\'t chosen a card', 400)
return self.chosen_cards.all()
def user_needs_to_choose_column(self, user_id):
self.check_is_started()
if not self.is_resolving_turn:
return False
user = self.find_user(user_id)
if self.get_user_chosen_card(user_id) == None:
return False
cc = self.get_user_chosen_card(user_id)
try:
self.get_suitable_column(cc)
except SixQuiPrendException:
lower_chosen_card_count = self.chosen_cards.join(Card) \
.filter(Card.number < cc.card.number) \
.count()
return lower_chosen_card_count == 0
return False
def can_place_card(self, current_user_id):
self.check_is_owner(current_user_id)
if self.status != Game.STATUS_STARTED:
return False
if self.is_resolving_turn:
chosen_card = self.chosen_cards.join(Card).order_by(Card.number.asc()).first()
if chosen_card.user.urole != User.ROLE_BOT:
if self.user_needs_to_choose_column(chosen_card.user_id):
return False
return True
if self.chosen_cards.count() == self.users.count():
return True
return False
def can_choose_cards_for_bots(self, current_user_id):
if self.can_place_card(current_user_id):
return False
for bot in self.users.filter(User.urole == User.ROLE_BOT).all():
if self.get_user_chosen_card(bot.id) == None:
return True
return False
################################################################################
## Actions
################################################################################
def create(user):
game = Game(status=Game.STATUS_CREATED)
game.users.append(user)
game.owner_id = user.id
db.session.add(game)
db.session.commit()
return game
def delete(game_id):
game = Game.find(game_id)
db.session.delete(game)
db.session.commit()
def setup(self, current_user_id):
self.check_is_owner(current_user_id)
if self.status != Game.STATUS_CREATED:
raise SixQuiPrendException('Can only start a created game', 400)
if self.users.count() < 2:
raise SixQuiPrendException('Cannot start game with less than 2 players', 400)
self.status = Game.STATUS_STARTED
card_set = list(range(1, app.config['MAX_CARD_NUMBER'] + 1))
for user in self.users.all():
user_hand = Hand(game_id=self.id, user_id=user.id)
for i in range(app.config['HAND_SIZE']):
index = random.randrange(len(card_set))
card_number = card_set.pop(index)
card = Card.query.filter(Card.number == card_number).first()
user_hand.cards.append(card)
db.session.add(user_hand)
user_heap = Heap(game_id=self.id, user_id=user.id)
db.session.add(user_heap)
for i in range(app.config['BOARD_SIZE']):
column = Column(game_id=self.id)
index = random.randrange(len(card_set))
card_number = card_set.pop(index)
card = Card.query.filter(Card.number == card_number).first()
column.cards.append(card)
db.session.add(column)
db.session.add(self)
db.session.commit()
def add_user(self, user):
if self.status != Game.STATUS_CREATED:
raise SixQuiPrendException('Cannot enter an already started game', 400)
if self.users.count() == app.config['MAX_PLAYER_NUMBER']:
max_number = str(app.config['MAX_PLAYER_NUMBER'])
error = 'Game has already ' + max_number + ' players'
raise SixQuiPrendException(error, 400)
if user in self.users.all():
raise SixQuiPrendException('Cannot enter twice in a game', 400)
self.users.append(user)
db.session.add(self)
db.session.commit()
def add_bot(self, bot_id, current_user_id):
self.check_is_owner(current_user_id)
bot = User.find(bot_id)
if bot.get_urole() != User.ROLE_BOT:
raise SixQuiPrendException('Can only add a bot', 400)
if bot in self.users.all():
raise SixQuiPrendException('Bot already in game', 400)
self.add_user(bot)
def remove_user(self, user):
if user not in self.users.all():
raise SixQuiPrendException('Not in game', 400)
if user.is_game_owner(self):
self.remove_owner(user.id)
if self.get_user_hand(user.id):
db.session.delete(self.get_user_hand(user.id))
if self.get_user_heap(user.id):
db.session.delete(self.get_user_heap(user.id))
if self.get_user_chosen_card(user.id):
db.session.delete(self.get_user_chosen_card(user.id))
self.users.remove(user)
db.session.add(self)
db.session.commit()
def remove_owner(self, user_id):
self.check_is_owner(user_id)
new_owner = self.users.filter(User.id != user_id,
User.urole != User.ROLE_BOT).first()
if not new_owner:
raise SixQuiPrendException('There is no other non-bot player', 400)
else:
self.owner_id = new_owner.id
db.session.add(self)
db.session.commit()
def place_card(self, current_user_id):
self.check_is_started()
if not self.can_place_card(current_user_id):
raise SixQuiPrendException('Cannot place a card right now', 422)
chosen_card = self.chosen_cards.join(Card).order_by(Card.number.asc()).first()
user_game_heap = self.get_user_heap(chosen_card.user_id)
try:
chosen_column = self.get_suitable_column(chosen_card)
if len(chosen_column.cards) == app.config['COLUMN_CARD_SIZE']:
user_game_heap.cards += chosen_column.cards
db.session.add(user_game_heap)
chosen_column.cards = []
chosen_column.cards.append(chosen_card.card)
db.session.add(chosen_column)
db.session.delete(chosen_card)
db.session.commit()
except SixQuiPrendException as e:
if chosen_card.user.urole == User.ROLE_BOT:
chosen_column = self.get_lowest_value_column()
chosen_column.replace_by_card(chosen_card)
else:
raise e
self.update_status()
return [chosen_column, user_game_heap]
def choose_cards_for_bots(self, current_user_id):
self.check_is_owner(current_user_id)
self.check_is_started()
if self.is_resolving_turn:
raise SixQuiPrendException('Cannot choose cards for bots while card is being placed',
400)
if not self.can_choose_cards_for_bots(current_user_id):
raise SixQuiPrendException('Bots have already chosen cards', 400)
for bot in self.users.filter(User.urole == User.ROLE_BOT).order_by(User.id).all():
if self.get_user_chosen_card(bot.id) == None:
self.choose_card_for_user(bot.id)
db.session.add(self)
db.session.commit()
def choose_card_for_user(self, user_id, card_id=None):
self.check_is_started()
user = self.find_user(user_id)
if self.is_resolving_turn:
raise SixQuiPrendException('Cannot choose a card while resolving a turn', 400)
if self.get_user_chosen_card(user_id):
raise SixQuiPrendException('User has already chosen a card', 400)
hand = self.get_user_hand(user_id)
if card_id == None:
index = random.randrange(len(hand.cards))
card = hand.cards.pop(index)
else:
filtered_cards = [card for card in hand.cards if card.id == card_id]
if len(filtered_cards) == 0:
raise SixQuiPrendException('Card not owned', 400)
else:
card = filtered_cards[0]
hand.cards.remove(card)
db.session.add(hand)
chosen_card = ChosenCard(game_id=self.id, user_id=user_id, card_id=card.id)
db.session.add(chosen_card)
if self.chosen_cards.count() == self.users.count():
self.is_resolving_turn = True
db.session.add(self)
db.session.commit()
return chosen_card
def choose_column_for_user(self, user_id, column_id):
self.check_is_started()
user = self.find_user(user_id)
chosen_column = self.find_column(column_id)
chosen_card = self.find_chosen_card(user_id)
user_heap = chosen_column.replace_by_card(chosen_card)
return [chosen_column, user_heap]
def update_status(self):
self.check_is_started()
if self.chosen_cards.count() > 0:
return
else:
self.is_resolving_turn = False
db.session.add(self)
db.session.commit()
for user in self.users:
if len(self.get_user_hand(user.id).cards) > 0:
return
self.status = Game.STATUS_FINISHED
db.session.add(self)
db.session.commit()
################################################################################
## Serializer
################################################################################
def serialize(self):
return {
'id': self.id,
'users': self.users.all(),
'owner_id': self.owner_id,
'status': self.status,
'is_resolving_turn': self.is_resolving_turn
}
|
|
# Blender import system clutter
import bpy
from bpy.types import GPencilFrame
import bmesh
from mathutils import Vector
import math
from math import sin, cos
import numpy as np
import sys
from pathlib import Path
UTILS_PATH = Path.home() / "Documents/python_workspace/data-science-learning"
sys.path.append(str(UTILS_PATH))
import importlib
import ds_utils.blender_utils
importlib.reload(ds_utils.blender_utils)
from ds_utils.blender_utils import *
###################################
### Static
###################################
gp_layer = init_grease_pencil(clear_layer=True, gpencil_layer_name='static')
gp_frame = gp_layer.frames.new(0)
# draw_line(gp_frame, (0, 0, 0), (1, 1, 0))
# draw_square(gp_frame, (0, 0, 0), 10)
# draw_circle(gp_frame, (0, 0, 0), 2, 32)
# draw_cube(gp_frame, (3, 3, 3), 1)
# draw_sphere(gp_frame, (1, 1, 1), 3, 32)
NUM_FRAMES = 30
FRAMES_SPACING = 1
bpy.context.scene.frame_start = 0
bpy.context.scene.frame_end = NUM_FRAMES*FRAMES_SPACING
def draw_wave_clock(gp_frame: GPencilFrame, nb_circles: int, center: tuple):
for i in range(nb_circles):
radius = np.random.randint(0, 5) + np.random.rand()
for axis in ['x', 'y', 'z']:
angle = np.random.randint(0, 3) + np.random.rand()
steps = np.random.randint(0, 10)
for j in range(steps):
circle = draw_circle(gp_frame, (0, 0, 0), radius, 64, material_index=j)
rotate_stroke(circle, (angle/steps)*j, axis=axis)
translate_stroke(circle, center)
#draw_wave_clock(gp_frame, 10, (0, 0, 0))
def squares_grid(gp_frame: GPencilFrame, nb_rows: int, nb_cols: int,
rand_size=False, rand_rotation=False, material_index=0):
for x in range(nb_cols):
for y in range(nb_rows):
center = (x, y, 0)
if rand_size:
radius = (x % (nb_cols/2) * y % (nb_rows/2))/((nb_cols/2)*(nb_rows/2)) + np.random.rand()/2
else:
radius = 1
gp_stroke = draw_square(gp_frame, center, radius, material_index=material_index)
draw_cube(gp_frame, center, radius)
if rand_rotation:
rotate_stroke(gp_stroke, np.random.rand())
#squares_grid(gp_frame, 10, 15, rand_size=True, rand_rotation=False, material_index=1)
def polygon_stairs(gp_frame, center: tuple, polygon_sides: int, side_len: float, nb_steps: int,
rotation_angle=0.5, step_size=0.5):
for step in range(nb_steps):
# draw polygon
stroke = draw_circle(gp_frame, (0, 0, step*step_size), side_len-0.1*step, polygon_sides, step)
# rotate polygon
rotate_stroke(stroke, rotation_angle * step)
translate_stroke(stroke, np.array(center))
#for i in range(10):
# for j in range(10):
# polygon_stairs(gp_frame, (i*7, j*7, 0), i+1, 3, 3*(j+1), rotation_angle=0.2, step_size=1)
###################################
### Animations
###################################
gp_layer = init_grease_pencil(clear_layer=True, gpencil_layer_name='anim')
NUM_FRAMES = 100
FRAMES_SPACING = 1
bpy.context.scene.frame_start = 0
bpy.context.scene.frame_end = NUM_FRAMES*FRAMES_SPACING
def draw_multiple_circles_animated(gp_layer):
for frame in range(20):
gp_frame = gp_layer.frames.new(frame)
for i in range(15):
radius = np.random.randint(1, 7) + np.random.rand()
draw_circle(gp_frame, (0, 0, 0), radius, 80)
#draw_multiple_circles_animated(gp_layer)
def kinetic_rotation_polygon(gp_layer, center: tuple, nb_polygons: int, nb_sides: int,
min_radius: float, max_radius: float,
nb_frames: int):
radiuses = np.linspace(min_radius, max_radius, nb_polygons)
#radiuses = np.random.rand(nb_polygons)*(max_radius - min_radius) + min_radius # randomized radiuses
main_angle = (2*pi)/nb_frames
# Animate polygons across frames
for frame in range(nb_frames):
gp_frame = gp_layer.frames.new(frame)
for i in range(nb_polygons):
polygon = draw_circle(gp_frame, (0, 0, 0), radiuses[i], nb_sides, i)
#cur_angle = ((len(radiuses) - i) * (2 * pi)) / nb_frames
cur_angle = ((len(radiuses) - i) // 2 * (2 * pi)) / nb_frames
for axis in ['x']:
rotate_stroke(polygon, cur_angle*frame, axis=axis)
translate_stroke(polygon, center)
#kinetic_rotation_polygon(gp_layer, (0, 0, 0), nb_polygons=20, nb_sides=4, min_radius=3, max_radius=10,
# nb_frames=NUM_FRAMES)
def animate_square_sliding(gp_layer):
main_size = 4
positions = np.linspace(-main_size / 2, main_size / 2, num=NUM_FRAMES)
for frame in range(1, NUM_FRAMES):
gp_frame = gp_layer.frames.new(frame*FRAMES_SPACING)
_ = draw_square(gp_frame, (0, 0, 0), main_size)
draw_square(gp_frame, (main_size/2+0.5, positions[frame], 0), 1)
#animate_square_sliding(gp_layer)
def _get_midpoint(p0: tuple, p1:tuple):
return (p0[0] + p1[0]) / 2, (p0[1] + p1[1]) / 2, (p0[2] + p1[2]) / 2
def polygon_recursive(gp_frame, polygon, step=0, max_steps=3):
# Init new stroke
gp_stroke = gp_frame.strokes.new()
gp_stroke.display_mode = '3DSPACE' # allows for editing
gp_stroke.draw_cyclic = True # closes the stroke
gp_stroke.material_index = step
# Define stroke geometry
gp_stroke.points.add(count=len(polygon))
for i, p in enumerate(polygon):
gp_stroke.points[i].co = p
if step >= max_steps:
return
else:
new_polygon = []
midpoints = []
for i in range(len(polygon)):
p0 = polygon[i]
p1 = polygon[0] if i == len(polygon)-1 else polygon[i+1]
opposite_point = (0, 0, 0)
midpoint = _get_midpoint(p0, p1)
new_point = _get_midpoint(opposite_point, midpoint)
for i in range(step):
new_point = _get_midpoint(new_point, midpoint)
new_polygon.append(new_point)
midpoints.append(midpoint)
polygon_recursive(gp_frame, new_polygon, step+1, max_steps)
for i in range(len(polygon)):
other_polygon = [polygon[i], midpoints[i-1], new_polygon[i-1], new_polygon[i], midpoints[i]]
polygon_recursive(gp_frame, other_polygon, step + 1, max_steps)
def polygon_recursive_2(gp_layer, center, radius, sides, step=0, max_steps=3):
#Init new stroke
if len(gp_layer.frames) > step:
gp_frame = gp_layer.frames[1]
else:
gp_frame = gp_layer.frames.new(step)
#gp_frame = gp_layer.frames.new(0)
draw_sphere(gp_frame, center, radius, 5, step)
cube = draw_circle(gp_frame, center, radius, 5)
if step >= max_steps:
return
else:
polygon_recursive_2(gp_layer, center, radius/2, sides, step+1, max_steps=max_steps)
new_radius = radius/2
for center in cube.points:
polygon_recursive_2(gp_layer, center.co, new_radius/2, sides, step + 1, max_steps=max_steps)
def draw_polygon_fractal(gp_frame, polygon_sides: int):
# Define base polygon
angle = 2*math.pi/polygon_sides # angle in radians
polygon = []
for i in range(polygon_sides):
x = 3*cos(angle*i)
y = 3*sin(angle*i)
z = 3
polygon.append((x, y, z))
polygon_recursive(gp_frame, polygon, max_steps=5)
#draw_polygon_fractal(gp_frame, 6)
#polygon_recursive_2(gp_layer, (0, 0, 0), 10, 4, 0, max_steps=3)
|
|
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import webob
from cinder.api import extensions
from cinder.api.v2 import volume_metadata
from cinder.api.v2 import volumes
from cinder import db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v2 import stubs
from cinder.volume import api as volume_api
CONF = cfg.CONF
def return_create_volume_metadata_max(context, volume_id, metadata, delete):
return stub_max_volume_metadata()
def return_create_volume_metadata(context, volume_id, metadata, delete):
return stub_volume_metadata()
def return_new_volume_metadata(context, volume_id, metadata, delete):
return stub_new_volume_metadata()
def return_create_volume_metadata_insensitive(context, snapshot_id,
metadata, delete):
return stub_volume_metadata_insensitive()
def return_volume_metadata(context, volume_id):
if not isinstance(volume_id, str) or not len(volume_id) == 36:
msg = 'id %s must be a uuid in return volume metadata' % volume_id
raise Exception(msg)
return stub_volume_metadata()
def return_empty_volume_metadata(context, volume_id):
return {}
def return_empty_container_metadata(context, volume_id, metadata, delete):
return {}
def delete_volume_metadata(context, volume_id, key):
pass
def stub_volume_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_new_volume_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def stub_volume_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def stub_max_volume_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_volume(context, volume_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'metadata': {},
'project_id': context.project_id}
def return_volume_nonexistent(*args, **kwargs):
raise exception.VolumeNotFound('bogus test message')
def fake_update_volume_metadata(self, context, volume, diff):
pass
class volumeMetaDataTest(test.TestCase):
def setUp(self):
super(volumeMetaDataTest, self).setUp()
self.volume_api = volume_api.API()
self.stubs.Set(db, 'volume_get', return_volume)
self.stubs.Set(db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(db, 'service_get_all_by_topic',
stubs.stub_service_get_all_by_topic)
self.stubs.Set(self.volume_api, 'update_volume_metadata',
fake_update_volume_metadata)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.volume_controller = volumes.VolumeController(self.ext_mgr)
self.controller = volume_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v2/fake/volumes/%s/metadata' % self.req_id
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"metadata": {}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.volume_controller.create(req, body)
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_volume(self):
self.stubs.Set(db, 'volume_metadata_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_volume(self):
self.stubs.Set(db, 'volume_metadata_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key6')
def test_delete(self):
self.stubs.Set(db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(db, 'volume_metadata_delete',
delete_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(200, res.status_int)
def test_delete_nonexistent_volume(self):
self.stubs.Set(db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(db, 'volume_metadata_delete',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key6')
def test_create(self):
self.stubs.Set(db, 'volume_metadata_get',
return_empty_volume_metadata)
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank('/v2/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3", }}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(body, res_dict)
def test_create_with_keys_in_uppercase_and_lowercase(self):
# if the keys in uppercase_and_lowercase, should return the one
# which server added
self.stubs.Set(db, 'volume_metadata_get',
return_empty_volume_metadata)
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata_insensitive)
req = fakes.HTTPRequest.blank('/v2/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"KEY1": "value1",
"key2": "value2",
"KEY2": "value2",
"key3": "value3",
"KEY4": "value4"}}
expected = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_create_empty_body(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, None)
def test_create_item_empty_key(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, body)
def test_create_item_key_too_long(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.req_id, body)
def test_create_nonexistent_volume(self):
self.stubs.Set(db, 'volume_get',
return_volume_nonexistent)
self.stubs.Set(db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank('/v2/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.req_id, body)
def test_update_all(self):
self.stubs.Set(db, 'volume_metadata_update',
return_new_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_with_keys_in_uppercase_and_lowercase(self):
self.stubs.Set(db, 'volume_metadata_get',
return_create_volume_metadata)
self.stubs.Set(db, 'volume_metadata_update',
return_new_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {
'metadata': {
'key10': 'value10',
'KEY10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(db, 'volume_metadata_update',
return_empty_container_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_malformed_data(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_nonexistent_volume(self):
self.stubs.Set(db, 'volume_get', return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.req_id, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_volume(self):
self.stubs.Set(db, 'volume_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank('/v2/fake/volumes/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_empty_body(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
None)
def test_update_item_empty_key(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, '', body)
def test_update_item_key_too_long(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, ("a" * 260), body)
def test_update_item_value_too_long(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, "key1", body)
def test_update_item_too_many_keys(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'bad',
body)
def test_invalid_metadata_items_on_create(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
#test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, data)
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 9 17:03:58 2016
@author: Tobias Jachowski
"""
import persistent
from collections import deque
class Node(persistent.Persistent):
"""
State information via references:
Node implements a directed acyclic graph with depth and breadth first
traversal. Each node has an attribute `cargo`, which can hold any python
object. Node provides methods to get and set nodes (and cargo) based on
different conditions, like level of depth or type of instance.
"""
def __init__(self, cargo=None, **kwargs):
"""
Attributes
----------
cargo : object
The cargo of the Node.
"""
# initialize variables
self._relatives = {'children': [],
'parents': []}
self._max_relatives = {'children': -1,
'parents': -1}
self.cargo = cargo
# self._processed = None
super().__init__(**kwargs)
def add_child(self, child, index=None, after=None, before=None):
"""
Add a child. See `add_relative()` for details.
"""
return self.add_relative(child, child=True, index=index, after=after,
before=before)
def add_parent(self, parent, index=None, after=None, before=None):
"""
Add a parent. See `add_relative()` for details.
"""
return self.add_relative(parent, child=False, index=index, after=after,
before=before)
def add_relative(self, relative, child=True, index=None, after=None,
before=None, bidirectional=True):
"""
Add a `relative` as a child or a parent.
The index of the position of inserting the relative is determined by
the following parameters in descending superseding order: `index`,
`after`, and `before`.
Parameters
----------
relative : Node or cargo
Node to be added as a relative, or cargo with attribute `_node`
referencing a Node.
child : bool, optional
Add a child or a parent.
index : int, optional
after : Node or cargo, optional
before : Node or cargo, optional
bidirectional : bool, optional
Add alsoe `self` to the `relative`, after `relative` has been added
to `self`, i.e. not only reference the `relative` from `self`, but
also reference `self` from `relative`.
Returns
-------
bool
True if `relative` could be added or `relative` is already present.
Otherwise return False.
"""
# Get nodes of relative, after, and before
relative = _get_node(relative)
if after is not None:
after = _get_node(after)
if before is not None:
before = _get_node(before)
# Try to add relative
self_add = self._add_relative(relative, child=child, index=index,
after=after, before=before)
# Try to add self to relative
relation_add = True
if bidirectional:
relation_add = relative._add_relative(self, child=not child)
# check for circular reference
circular_reference = relative.circular_reference(descendants=child)
# Relative could not be added, or self could not be added to relative,
# or circular_reference detected
if not self_add or not relation_add or circular_reference:
# Either relative couldn't be added or
# circular reference occured -> remove relative
self._remove_relative(relative, child=child)
relative._remove_relative(self, child=not child)
return False
# relative was added or already present
return True
def _add_relative(self, relative, child=True, index=None, after=None,
before=None):
if child:
relatives = self._children
max_relatives = self.max_children
else:
relatives = self._parents
max_relatives = self.max_parents
if max_relatives != -1 and len(relatives) >= max_relatives:
print("Can not add relative. No more relatives allowed!")
return False
if relative is not None:
if relative in relatives:
# parent already exists, avoid adding a second time and return
# True
print("Relative %s already referenced." % relative)
return True
# determine the index of self._parents/self._childs before which
# the parent/child should be inserted
if index is not None:
# highest priority of index determination, take the given index
pass
elif after in relatives:
# second highest priority (determine index by after)
index = relatives.index(after) + 1
elif before in relatives:
# third highest priority (determine index by before)
index = relatives.index(before)
else:
# default: append parent/child (add parent/child at the end)
index = len(relatives)
# Add parent/child at determined index
relatives.insert(index, relative)
# inform ZODB of change
self._p_changed = True
return True
def remove_child(self, child):
return self.remove_relative(child, child=True)
def remove_parent(self, parent):
return self.remove_relative(parent, child=False)
def remove_relative(self, relative, child=True):
"""
Remove child or parent from self.
"""
relative = _get_node(relative)
self_remove = self._remove_relative(relative, child)
relate_remove = relative._remove_relative(self, child=not child)
# No relative was removed
if not (self_remove or relate_remove):
return False
# relative was removed
return True
def _remove_relative(self, relative, child=True):
if child:
relatives = self._children
else:
relatives = self._parents
# remove child/parent from relatives
if relative in relatives:
relatives.remove(relative)
# inform ZODB of change
self._p_changed = True
return True
return False
def circular_reference(self, descendants=True):
"""
Check for circular references.
"""
# determine direction of search for circular reference
relatives = self.relatives(descendants=descendants)
# Search for a circular reference.
# First, assume there is no circular reference:
# If this View has no modifications or relatives, there is no
# circular reference and circular stays False
# Check for circular reference via modifications
# for modification in modifications:
# circular = circular or modification.circular_reference(down,
# caller)
# Check for circular reference via relatives
for relative in relatives:
if relative is self:
print("Circular reference detected -> Illegal connection!")
return True
# stop recursion: return result of circular recursive search
return False
def relatives(self, descendants=True, includeself=False, dft=True,
level=-1, cargo=False):
"""
Traverse relatives.
Parameters
----------
descendants : bool
Yield descendants or ancestors
includeself : bool
Yield only relatives or also include self
dft : bool
Use depth first or breadth first traversal
level : int
Yield self (0), up to first generation (1), up to second generation
(2), ...
cargo : bool
Yield node or its cargo
Yields
-------
Node
If `cargo` is False.
object
If `cargo` is True.
Notes
-----
Level describes the generation up to which a relative should be
yielded:
|
level 0 Node
| \
level 1 | Node
| |
level 2 | Node
| / | \
level 1/3 Node Node Node
/ | \
level 2/4 Node Node Node
|
"""
# see http://jeremykun.com/2013/01/22/depth-and-breadth-first-search/
# Initialization ...
toprocess = deque()
if dft:
# reversed iteration for depth first search
iterswitch = reversed
addtoprocess = toprocess.append
else:
iterswitch = iter
addtoprocess = toprocess.appendleft
# start with either self or the relatives
if includeself:
# add self at level 0
toprocess.append((0, self))
elif level == -1 or level >= 1:
# relatives are either children or parents:
if descendants:
relatives = self._children
else:
relatives = self._parents
for n in iterswitch(relatives):
# add relatives at level 1
addtoprocess((1, n))
visited = set()
# better execution time but not thread save!
# search_time = datetime.now().timestamp()
# process_id = uuid.uuid4()
# search_time = process_id
# better execution time but not thread save!
# Traversal and processing ...
while len(toprocess) > 0:
current_level, node = toprocess.pop()
# if node._processed != search_time:
if node not in visited:
visited.add(node)
# node._processed = search_time
if cargo and node.cargo is not None:
yield node.cargo
if not cargo:
yield node
if descendants:
relatives = node._children
else:
relatives = node._parents
next_level = current_level + 1
if level == -1 or next_level <= level:
for n in iterswitch(relatives):
# if n._processed != search_time:
if n not in visited:
addtoprocess((next_level, n))
@property
def _children(self):
return self._relatives['children']
@property
def _parents(self):
return self._relatives['parents']
@property
def max_children(self):
return self._max_relatives['children']
@max_children.setter
def max_children(self, max_children):
self._max_relatives['children'] = max_children
@property
def max_parents(self):
return self._max_relatives['parents']
@max_parents.setter
def max_parents(self, max_parents):
self._max_relatives['parents'] = max_parents
@property
def parents(self):
return self.relatives(descendants=False, dft=False, level=1)
@property
def children(self):
return self.relatives(descendants=True, dft=False, level=1)
@property
def ancestors(self):
return self.relatives(descendants=False, dft=False)
@property
def descendants(self):
return self.relatives(descendants=True, dft=False)
class GraphMember(persistent.Persistent):
"""
GraphMember has one instance of class Node, offering convenience functions
to handle the Node simply by inheriting, and give the ancestor class a Node
like behaviour.
It implements methods to inform other members of the graph of a change.
"""
def __init__(self, max_parents=-1, max_children=-1, updated=False,
name=None, group=None, **kwargs):
self._node = Node(cargo=self)
self.max_parents = max_parents
self.max_children = max_children
# Initialize the status of a GraphMember to be not updated, per default
self.updated = updated
self.name = name
self.group = group
def members(self, name=None, group=None, instance_class=None,
descendants=True, includeself=True, dft=False, level=-1):
for relative in self._node.relatives(descendants=descendants,
includeself=includeself, dft=dft,
level=level, cargo=True):
if (name is None or relative.name == name) \
and (group is None or relative.group == group) \
and (instance_class is None
or isinstance(relative, instance_class)):
yield relative
def group_ends(self, group, root=None, descendants=True, includeself=True):
for member in self.members(group=group, descendants=descendants,
includeself=includeself, dft=True):
if member.is_end(root=root):
yield member
def group_roots(self, group, descendants=True, includeself=True):
return self.group_ends(group, root=True, descendants=descendants,
includeself=includeself)
def group_leafs(self, group, descendants=True, includeself=True):
return self.group_ends(group, root=False, descendants=descendants,
includeself=includeself)
def group_end(self, group, root=True, descendants=True, includeself=True):
ends = self.group_ends(group, root=root, descendants=descendants,
includeself=includeself)
return next(ends, None)
def group_root(self, group, descendants=True, includeself=True):
return self.group_end(group, root=True, descendants=descendants,
includeself=includeself)
def group_leaf(self, group, descendants=True, includeself=True):
return self.group_end(group, root=False, descendants=descendants,
includeself=includeself)
def is_end(self, root=None):
"""
Parameters
----------
root : None or bool, optional
If root is None, check whether GraphMember is root or leaf of
own group. If root is True, check whether Graphmember is root of
group, if root is False check Graphmember beeing a leaf of group.
"""
if root is None:
return self.is_group_root or self.is_group_leaf
elif root:
return self.is_group_root
else:
return self.is_group_leaf
@property
def is_group_root(self):
parent = self.parent
return parent is None or self.group != parent.group
@property
def is_group_leaf(self):
child = self.child
return child is None or self.group != child.group
def set_changed(self, descendants=True, includeself=True, level=-1,
**kwargs):
"""
Inform all descendants/ancestors (Nodes that have this Node as a
parent/child) about a change, so that the children inform their
children, about the change.
Has to be called upon any change of `self` and parents.
It calls `member_changed(ancestor=descendants, **kwargs)` on `self`,
if `includeself` is True, and on all descendants.
Parameters
----------
descendants : bool
Inform descendants or ancestors about change
level : int
Up to which generation the change should be proclaimed
includeself : bool
Should self be informed, too, i.e. should member_changed() be
called?
**kwargs
See member_changed() for parameters
"""
# inform node about change
# the node in turn will call member_changed() method of the cargos
# requested (see set_changed() method of node)
if includeself:
self.member_changed(ancestor=descendants, calledfromself=True,
**kwargs)
# Get either descendants or ancestors to be informed of the change
members = self.members(descendants=descendants, includeself=False,
dft=False, level=level)
for member in members:
member.member_changed(ancestor=descendants, calledfromself=False,
**kwargs)
def member_changed(self, ancestor=True, calledfromself=False,
updated=False, **kwargs):
# `self` triggered the change. Set updated status of `self` according
# to the parameter `updated`.
if calledfromself:
self.updated = updated
# An ancestor triggered the change and `self` is set to be outdated. A
# change of descendants will be ignored.
if not calledfromself and ancestor:
self.updated = False
def add_member(self, member, child=True, index=None, after=None,
before=None, set_changed=True, **kwargs):
# Try to add the new member
added = self._node.add_relative(member, child=child, index=index,
after=after, before=before, **kwargs)
# Inform the child or self and the children about change
if added and set_changed:
if child:
# Inform the child about the addition of a new parent (i.e. the
# addition of self)
member.set_changed()
else:
# inform self and children about addition of a new parent
self.set_changed()
return added
def add_parent(self, parent, index=None, after=None, before=None,
set_changed=True, **kwargs):
# Try to add the new parent
return self.add_member(parent, child=False, index=index, after=after,
before=before, set_changed=set_changed,
**kwargs)
def add_child(self, child, index=None, after=None, before=None,
set_changed=True, **kwargs):
# Try to add the new child
return self.add_member(child, child=True, index=index, after=after,
before=before, set_changed=set_changed,
**kwargs)
def remove_member(self, member, child=True, set_changed=True):
# Try to remove the member
removed = self._node.remove_relative(member, child=child)
# Inform the child or self and the children about change
if removed and set_changed:
if child:
# Inform the child about the loss of a parent (i.e. the loss
# of self)
member.set_changed()
else:
# Inform self and children about loss of a parent
self.set_changed()
return removed
def remove_parent(self, parent, set_changed=True):
"""
Remove parent from self.parents
"""
return self.remove_member(parent, child=False, set_changed=set_changed)
def remove_child(self, child, set_changed=True):
"""
Remove child from self.children
"""
return self.remove_member(child, child=True, set_changed=set_changed)
def set_member(self, member, child=True, set_changed=True):
"""
Replace all children or parents with `member`.
"""
if child:
members = self.children
else:
members = self.parents
# Remove all members
for old_member in members:
self.remove_member(old_member, child=child, set_changed=set_changed)
return self.add_member(member, child=child, set_changed=set_changed)
def set_parent(self, parent, set_changed=True):
"""
Replace all parents with `parent`.
"""
return self.set_member(parent, child=False, set_changed=set_changed)
def set_child(self, child, set_changed=True):
"""
Replace all children with `child`.
"""
return self.set_member(child, child=True, set_changed=set_changed)
def first_ancestor_instance(self, instance_class, dft=True, level=-1):
ancestors = self.members(instance_class=instance_class,
descendants=False, includeself=False, dft=dft,
level=level)
return next(ancestors, None)
def parent_instances(self, instance_class):
members = self.members(instance_class=instance_class,
descendants=False, includeself=False, level=1)
return members
def child_instances(self, instance_class=None):
members = self.members(instance_class=instance_class,
descendants=True, includeself=False, level=1)
return members
@property
def parents(self):
return self.members(descendants=False, includeself=False, level=1)
@property
def children(self):
return self.members(descendants=True, includeself=False, level=1)
@property
def parent(self):
"""
Return first parent or None.
"""
return next(self.parents, None)
@property
def child(self):
"""
Return first child or None.
"""
return next(self.children, None)
@property
def max_parents(self):
return self._node.max_parents
@max_parents.setter
def max_parents(self, max_parents):
self._node.max_parents = max_parents
@property
def max_children(self):
return self._node.max_children
@max_children.setter
def max_children(self, max_children):
self._node.max_children = max_children
def __str__(self):
if self.name is None or self.group is None:
return self.__repr__()
else:
return "".join(["".join(self.name).ljust(22),
"".join(self.group).ljust(18),
"".join(["<", self.__class__.__name__,
">"]).ljust(14)
])
def _get_node(cargo):
node = cargo
if not isinstance(cargo, Node):
try:
node = cargo._node
except:
raise AttributeError("The cargo does not have an attribute `_node`"
" of type Node, which is necessary to be used"
" as a cargo from an instance of class Node.")
return node
|
|
"""
This module contains functions to:
- solve a single equation for a single variable, in any domain either real or complex.
- solve a system of linear equations with N variables and M equations.
"""
from __future__ import print_function, division
from sympy.core.sympify import sympify
from sympy.core import S, Pow, Dummy, pi, Expr, Wild, Mul, Equality
from sympy.core.numbers import I, Number, Rational, oo
from sympy.core.function import (Lambda, expand, expand_complex)
from sympy.core.relational import Eq
from sympy.simplify.simplify import simplify, fraction, trigsimp
from sympy.core.symbol import Symbol
from sympy.functions import (log, Abs, tan, cot, sin, cos, sec, csc, exp,
acos, asin, acsc, asec, arg,
piecewise_fold)
from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
HyperbolicFunction)
from sympy.functions.elementary.miscellaneous import real_root
from sympy.sets import (FiniteSet, EmptySet, imageset, Interval, Intersection,
Union, ConditionSet)
from sympy.matrices import Matrix
from sympy.polys import (roots, Poly, degree, together, PolynomialError,
RootOf)
from sympy.solvers.solvers import checksol, denoms, unrad
from sympy.solvers.inequalities import solve_univariate_inequality
from sympy.utilities import filldedent
def _invert(f_x, y, x, domain=S.Complexes):
"""
Reduce the complex valued equation ``f(x) = y`` to a set of equations
``{g(x) = h_1(y), g(x) = h_2(y), ..., g(x) = h_n(y) }`` where ``g(x)`` is
a simpler function than ``f(x)``. The return value is a tuple ``(g(x),
set_h)``, where ``g(x)`` is a function of ``x`` and ``set_h`` is
the set of function ``{h_1(y), h_2(y), ..., h_n(y)}``.
Here, ``y`` is not necessarily a symbol.
The ``set_h`` contains the functions along with the information
about their domain in which they are valid, through set
operations. For instance, if ``y = Abs(x) - n``, is inverted
in the real domain, then, the ``set_h`` doesn't simply return
`{-n, n}`, as the nature of `n` is unknown; rather it will return:
`Intersection([0, oo) {n}) U Intersection((-oo, 0], {-n})`
By default, the complex domain is used but note that inverting even
seemingly simple functions like ``exp(x)`` can give very different
result in the complex domain than are obtained in the real domain.
(In the case of ``exp(x)``, the inversion via ``log`` is multi-valued
in the complex domain, having infinitely many branches.)
If you are working with real values only (or you are not sure which
function to use) you should probably use set the domain to
``S.Reals`` (or use `invert\_real` which does that automatically).
Examples
========
>>> from sympy.solvers.solveset import invert_complex, invert_real
>>> from sympy.abc import x, y
>>> from sympy import exp, log
When does exp(x) == y?
>>> invert_complex(exp(x), y, x)
(x, ImageSet(Lambda(_n, I*(2*_n*pi + arg(y)) + log(Abs(y))), Integers()))
>>> invert_real(exp(x), y, x)
(x, Intersection((-oo, oo), {log(y)}))
When does exp(x) == 1?
>>> invert_complex(exp(x), 1, x)
(x, ImageSet(Lambda(_n, 2*_n*I*pi), Integers()))
>>> invert_real(exp(x), 1, x)
(x, {0})
See Also
========
invert_real, invert_complex
"""
x = sympify(x)
if not x.is_Symbol:
raise ValueError("x must be a symbol")
f_x = sympify(f_x)
if not f_x.has(x):
raise ValueError("Inverse of constant function doesn't exist")
y = sympify(y)
if y.has(x):
raise ValueError("y should be independent of x ")
if domain.is_subset(S.Reals):
x, s = _invert_real(f_x, FiniteSet(y), x)
else:
x, s = _invert_complex(f_x, FiniteSet(y), x)
return x, s.intersection(domain) if isinstance(s, FiniteSet) else s
invert_complex = _invert
def invert_real(f_x, y, x, domain=S.Reals):
return _invert(f_x, y, x, domain)
def _invert_real(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol:
return (f, g_ys)
n = Dummy('n', real=True)
if hasattr(f, 'inverse') and not isinstance(f, (
TrigonometricFunction,
HyperbolicFunction,
)):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_real(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys),
symbol)
if isinstance(f, Abs):
pos = Interval(0, S.Infinity)
neg = Interval(S.NegativeInfinity, 0)
return _invert_real(f.args[0],
Union(imageset(Lambda(n, n), g_ys).intersect(pos),
imageset(Lambda(n, -n), g_ys).intersect(neg)), symbol)
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_real(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
return _invert_real(h, imageset(Lambda(n, n/g), g_ys), symbol)
if f.is_Pow:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if not expo_has_sym:
res = imageset(Lambda(n, real_root(n, expo)), g_ys)
if expo.is_rational:
numer, denom = expo.as_numer_denom()
if numer == S.One or numer == - S.One:
return _invert_real(base, res, symbol)
else:
if numer % 2 == 0:
n = Dummy('n')
neg_res = imageset(Lambda(n, -n), res)
return _invert_real(base, res + neg_res, symbol)
else:
return _invert_real(base, res, symbol)
else:
if not base.is_positive:
raise ValueError("x**w where w is irrational is not "
"defined for negative x")
return _invert_real(base, res, symbol)
if not base_has_sym:
return _invert_real(expo,
imageset(Lambda(n, log(n)/log(base)), g_ys), symbol)
if isinstance(f, TrigonometricFunction):
if isinstance(g_ys, FiniteSet):
def inv(trig):
if isinstance(f, (sin, csc)):
F = asin if isinstance(f, sin) else acsc
return (lambda a: n*pi + (-1)**n*F(a),)
if isinstance(f, (cos, sec)):
F = acos if isinstance(f, cos) else asec
return (
lambda a: 2*n*pi + F(a),
lambda a: 2*n*pi - F(a),)
if isinstance(f, (tan, cot)):
return (lambda a: n*pi + f.inverse()(a),)
n = Dummy('n', integer=True)
invs = S.EmptySet
for L in inv(f):
invs += Union(*[imageset(Lambda(n, L(g)), S.Integers) for g in g_ys])
return _invert_real(f.args[0], invs, symbol)
return (f, g_ys)
def _invert_complex(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol:
return (f, g_ys)
n = Dummy('n')
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_complex(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
return _invert_complex(h, imageset(Lambda(n, n/g), g_ys), symbol)
if hasattr(f, 'inverse') and \
not isinstance(f, TrigonometricFunction) and \
not isinstance(f, exp):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_complex(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys), symbol)
if isinstance(f, exp):
if isinstance(g_ys, FiniteSet):
exp_invs = Union(*[imageset(Lambda(n, I*(2*n*pi + arg(g_y)) +
log(Abs(g_y))), S.Integers)
for g_y in g_ys if g_y != 0])
return _invert_complex(f.args[0], exp_invs, symbol)
return (f, g_ys)
def domain_check(f, symbol, p):
"""Returns False if point p is infinite or any subexpression of f
is infinite or becomes so after replacing symbol with p. If none of
these conditions is met then True will be returned.
Examples
========
>>> from sympy import Mul, oo
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import domain_check
>>> g = 1/(1 + (1/(x + 1))**2)
>>> domain_check(g, x, -1)
False
>>> domain_check(x**2, x, 0)
True
>>> domain_check(1/x, x, oo)
False
* The function relies on the assumption that the original form
of the equation has not been changed by automatic simplification.
>>> domain_check(x/x, x, 0) # x/x is automatically simplified to 1
True
* To deal with automatic evaluations use evaluate=False:
>>> domain_check(Mul(x, 1/x, evaluate=False), x, 0)
False
"""
f, p = sympify(f), sympify(p)
if p.is_infinite:
return False
return _domain_check(f, symbol, p)
def _domain_check(f, symbol, p):
# helper for domain check
if f.is_Atom and f.is_finite:
return True
elif f.subs(symbol, p).is_infinite:
return False
else:
return all([_domain_check(g, symbol, p)
for g in f.args])
def _is_finite_with_finite_vars(f, domain=S.Complexes):
"""
Return True if the given expression is finite. For symbols that
don't assign a value for `complex` and/or `real`, the domain will
be used to assign a value; symbols that don't assign a value
for `finite` will be made finite. All other assumptions are
left unmodified.
"""
def assumptions(s):
A = s.assumptions0
if A.get('finite', None) is None:
A['finite'] = True
A.setdefault('complex', True)
A.setdefault('real', domain.is_subset(S.Reals))
return A
reps = dict([(s, Dummy(**assumptions(s))) for s in f.free_symbols])
return f.xreplace(reps).is_finite
def _is_function_class_equation(func_class, f, symbol):
""" Tests whether the equation is an equation of the given function class.
The given equation belongs to the given function class if it is
comprised of functions of the function class which are multiplied by
or added to expressions independent of the symbol. In addition, the
arguments of all such functions must be linear in the symbol as well.
Examples
========
>>> from sympy.solvers.solveset import _is_function_class_equation
>>> from sympy import tan, sin, tanh, sinh, exp
>>> from sympy.abc import x
>>> from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
... HyperbolicFunction)
>>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)
True
>>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)
True
>>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)
True
"""
if f.is_Mul or f.is_Add:
return all(_is_function_class_equation(func_class, arg, symbol)
for arg in f.args)
if f.is_Pow:
if not f.exp.has(symbol):
return _is_function_class_equation(func_class, f.base, symbol)
else:
return False
if not f.has(symbol):
return True
if isinstance(f, func_class):
try:
g = Poly(f.args[0], symbol)
return g.degree() <= 1
except PolynomialError:
return False
else:
return False
def _solve_as_rational(f, symbol, domain):
""" solve rational functions"""
f = together(f, deep=True)
g, h = fraction(f)
if not h.has(symbol):
return _solve_as_poly(g, symbol, domain)
else:
valid_solns = _solveset(g, symbol, domain)
invalid_solns = _solveset(h, symbol, domain)
return valid_solns - invalid_solns
def _solve_real_trig(f, symbol):
""" Helper to solve trigonometric equations """
f = trigsimp(f)
f_original = f
f = f.rewrite(exp)
f = together(f)
g, h = fraction(f)
y = Dummy('y')
g, h = g.expand(), h.expand()
g, h = g.subs(exp(I*symbol), y), h.subs(exp(I*symbol), y)
if g.has(symbol) or h.has(symbol):
return ConditionSet(symbol, Eq(f, 0), S.Reals)
solns = solveset_complex(g, y) - solveset_complex(h, y)
if isinstance(solns, FiniteSet):
return Union(*[invert_complex(exp(I*symbol), s, symbol)[1]
for s in solns])
elif solns is S.EmptySet:
return S.EmptySet
else:
return ConditionSet(symbol, Eq(f_original, 0), S.Reals)
def _solve_as_poly(f, symbol, domain=S.Complexes):
"""
Solve the equation using polynomial techniques if it already is a
polynomial equation or, with a change of variables, can be made so.
"""
result = None
if f.is_polynomial(symbol):
solns = roots(f, symbol, cubics=True, quartics=True,
quintics=True, domain='EX')
num_roots = sum(solns.values())
if degree(f, symbol) <= num_roots:
result = FiniteSet(*solns.keys())
else:
poly = Poly(f, symbol)
solns = poly.all_roots()
if poly.degree() <= len(solns):
result = FiniteSet(*solns)
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
else:
poly = Poly(f)
if poly is None:
result = ConditionSet(symbol, Eq(f, 0), domain)
gens = [g for g in poly.gens if g.has(symbol)]
if len(gens) == 1:
poly = Poly(poly, gens[0])
gen = poly.gen
deg = poly.degree()
poly = Poly(poly.as_expr(), poly.gen, composite=True)
poly_solns = FiniteSet(*roots(poly, cubics=True, quartics=True,
quintics=True).keys())
if len(poly_solns) < deg:
result = ConditionSet(symbol, Eq(f, 0), domain)
if gen != symbol:
y = Dummy('y')
inverter = invert_real if domain.is_subset(S.Reals) else invert_complex
lhs, rhs_s = inverter(gen, y, symbol)
if lhs == symbol:
result = Union(*[rhs_s.subs(y, s) for s in poly_solns])
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
if result is not None:
if isinstance(result, FiniteSet):
# this is to simplify solutions like -sqrt(-I) to sqrt(2)/2
# - sqrt(2)*I/2. We are not expanding for solution with free
# variables because that makes the solution more complicated. For
# example expand_complex(a) returns re(a) + I*im(a)
if all([s.free_symbols == set() and not isinstance(s, RootOf)
for s in result]):
s = Dummy('s')
result = imageset(Lambda(s, expand_complex(s)), result)
if isinstance(result, FiniteSet):
result = result.intersection(domain)
return result
else:
return ConditionSet(symbol, Eq(f, 0), domain)
def _has_rational_power(expr, symbol):
"""
Returns (bool, den) where bool is True if the term has a
non-integer rational power and den is the denominator of the
expression's exponent.
Examples
========
>>> from sympy.solvers.solveset import _has_rational_power
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> _has_rational_power(sqrt(x), x)
(True, 2)
>>> _has_rational_power(x**2, x)
(False, 1)
"""
a, p, q = Wild('a'), Wild('p'), Wild('q')
pattern_match = expr.match(a*p**q) or {}
if pattern_match.get(a, S.Zero) is S.Zero:
return (False, S.One)
elif p not in pattern_match.keys():
return (False, S.One)
elif isinstance(pattern_match[q], Rational) \
and pattern_match[p].has(symbol):
if not pattern_match[q].q == S.One:
return (True, pattern_match[q].q)
if not isinstance(pattern_match[a], Pow) \
or isinstance(pattern_match[a], Mul):
return (False, S.One)
else:
return _has_rational_power(pattern_match[a], symbol)
def _solve_radical(f, symbol, solveset_solver):
""" Helper function to solve equations with radicals """
eq, cov = unrad(f)
if not cov:
result = solveset_solver(eq, symbol) - \
Union(*[solveset_solver(g, symbol) for g in denoms(f, [symbol])])
else:
y, yeq = cov
if not solveset_solver(y - I, y):
yreal = Dummy('yreal', real=True)
yeq = yeq.xreplace({y: yreal})
eq = eq.xreplace({y: yreal})
y = yreal
g_y_s = solveset_solver(yeq, symbol)
f_y_sols = solveset_solver(eq, y)
result = Union(*[imageset(Lambda(y, g_y), f_y_sols)
for g_y in g_y_s])
return FiniteSet(*[s for s in result if checksol(f, symbol, s) is True])
def _solve_abs(f, symbol, domain):
""" Helper function to solve equation involving absolute value function """
if not domain.is_subset(S.Reals):
raise ValueError(filldedent('''
Absolute values cannot be inverted in the
complex domain.'''))
p, q, r = Wild('p'), Wild('q'), Wild('r')
pattern_match = f.match(p*Abs(q) + r) or {}
if not pattern_match.get(p, S.Zero).is_zero:
f_p, f_q, f_r = pattern_match[p], pattern_match[q], pattern_match[r]
q_pos_cond = solve_univariate_inequality(f_q >= 0, symbol,
relational=False)
q_neg_cond = solve_univariate_inequality(f_q < 0, symbol,
relational=False)
sols_q_pos = solveset_real(f_p*f_q + f_r,
symbol).intersect(q_pos_cond)
sols_q_neg = solveset_real(f_p*(-f_q) + f_r,
symbol).intersect(q_neg_cond)
return Union(sols_q_pos, sols_q_neg)
else:
return ConditionSet(symbol, Eq(f, 0), domain)
def _solveset(f, symbol, domain, _check=False):
"""Helper for solveset to return a result from an expression
that has already been sympify'ed and is known to contain the
given symbol."""
# _check controls whether the answer is checked or not
from sympy.simplify.simplify import signsimp
orig_f = f
f = together(f)
if f.is_Mul:
_, f = f.as_independent(symbol, as_Add=False)
if f.is_Add:
a, h = f.as_independent(symbol)
m, h = h.as_independent(symbol, as_Add=False)
f = a/m + h # XXX condition `m != 0` should be added to soln
f = piecewise_fold(f)
# assign the solvers to use
solver = lambda f, x, domain=domain: _solveset(f, x, domain)
if domain.is_subset(S.Reals):
inverter_func = invert_real
else:
inverter_func = invert_complex
inverter = lambda f, rhs, symbol: inverter_func(f, rhs, symbol, domain)
result = EmptySet()
if f.expand().is_zero:
return domain
elif not f.has(symbol):
return EmptySet()
elif f.is_Mul and all(_is_finite_with_finite_vars(m, domain)
for m in f.args):
# if f(x) and g(x) are both finite we can say that the solution of
# f(x)*g(x) == 0 is same as Union(f(x) == 0, g(x) == 0) is not true in
# general. g(x) can grow to infinitely large for the values where
# f(x) == 0. To be sure that we are not silently allowing any
# wrong solutions we are using this technique only if both f and g are
# finite for a finite input.
result = Union(*[solver(m, symbol) for m in f.args])
elif _is_function_class_equation(TrigonometricFunction, f, symbol) or \
_is_function_class_equation(HyperbolicFunction, f, symbol):
result = _solve_real_trig(f, symbol)
elif f.is_Piecewise:
dom = domain
result = EmptySet()
expr_set_pairs = f.as_expr_set_pairs()
for (expr, in_set) in expr_set_pairs:
if in_set.is_Relational:
in_set = in_set.as_set()
if in_set.is_Interval:
dom -= in_set
solns = solver(expr, symbol, in_set)
result += solns
else:
lhs, rhs_s = inverter(f, 0, symbol)
if lhs == symbol:
# do some very minimal simplification since
# repeated inversion may have left the result
# in a state that other solvers (e.g. poly)
# would have simplified; this is done here
# rather than in the inverter since here it
# is only done once whereas there it would
# be repeated for each step of the inversion
if isinstance(rhs_s, FiniteSet):
rhs_s = FiniteSet(*[Mul(*
signsimp(i).as_content_primitive())
for i in rhs_s])
result = rhs_s
elif isinstance(rhs_s, FiniteSet):
for equation in [lhs - rhs for rhs in rhs_s]:
if equation == f:
if any(_has_rational_power(g, symbol)[0]
for g in equation.args) or _has_rational_power(
equation, symbol)[0]:
result += _solve_radical(equation,
symbol,
solver)
elif equation.has(Abs):
result += _solve_abs(f, symbol, domain)
else:
result += _solve_as_rational(equation, symbol, domain)
else:
result += solver(equation, symbol)
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
if _check:
if isinstance(result, ConditionSet):
# it wasn't solved or has enumerated all conditions
# -- leave it alone
return result
# whittle away all but the symbol-containing core
# to use this for testing
fx = orig_f.as_independent(symbol, as_Add=True)[1]
fx = fx.as_independent(symbol, as_Add=False)[1]
if isinstance(result, FiniteSet):
# check the result for invalid solutions
result = FiniteSet(*[s for s in result
if isinstance(s, RootOf)
or domain_check(fx, symbol, s)])
return result
def solveset(f, symbol=None, domain=S.Complexes):
"""Solves a given inequality or equation with set as output
Parameters
==========
f : Expr or a relational.
The target equation or inequality
symbol : Symbol
The variable for which the equation is solved
domain : Set
The domain over which the equation is solved
Returns
=======
Set
A set of values for `symbol` for which `f` is True or is equal to
zero. An `EmptySet` is returned if `f` is False or nonzero.
A `ConditionSet` is returned as unsolved object if algorithms
to evaluatee complete solution are not yet implemented.
`solveset` claims to be complete in the solution set that it returns.
Raises
======
NotImplementedError
The algorithms to solve inequalities in complex domain are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report to the github issue tracker.
Notes
=====
Python interprets 0 and 1 as False and True, respectively, but
in this function they refer to solutions of an expression. So 0 and 1
return the Domain and EmptySet, respectively, while True and False
return the opposite (as they are assumed to be solutions of relational
expressions).
See Also
========
solveset_real: solver for real domain
solveset_complex: solver for complex domain
Examples
========
>>> from sympy import exp, sin, Symbol, pprint, S
>>> from sympy.solvers.solveset import solveset, solveset_real
* The default domain is complex. Not specifying a domain will lead
to the solving of the equation in the complex domain (and this
is not affected by the assumptions on the symbol):
>>> x = Symbol('x')
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers()}
>>> x = Symbol('x', real=True)
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers()}
* If you want to use `solveset` to solve the equation in the
real domain, provide a real domain. (Using `solveset\_real`
does this automatically.)
>>> R = S.Reals
>>> x = Symbol('x')
>>> solveset(exp(x) - 1, x, R)
{0}
>>> solveset_real(exp(x) - 1, x)
{0}
The solution is mostly unaffected by assumptions on the symbol,
but there may be some slight difference:
>>> pprint(solveset(sin(x)/x,x), use_unicode=False)
({2*n*pi | n in Integers()} \ {0}) U ({2*n*pi + pi | n in Integers()} \ {0})
>>> p = Symbol('p', positive=True)
>>> pprint(solveset(sin(p)/p, p), use_unicode=False)
{2*n*pi | n in Integers()} U {2*n*pi + pi | n in Integers()}
* Inequalities can be solved over the real domain only. Use of a complex
domain leads to a NotImplementedError.
>>> solveset(exp(x) > 1, x, R)
(0, oo)
"""
f = sympify(f)
if f is S.true:
return domain
if f is S.false:
return S.EmptySet
if not isinstance(f, (Expr, Number)):
raise ValueError("%s is not a valid SymPy expression" % (f))
free_symbols = f.free_symbols
if not free_symbols:
b = Eq(f, 0)
if b is S.true:
return domain
elif b is S.false:
return S.EmptySet
else:
raise NotImplementedError(filldedent('''
relationship between value and 0 is unknown: %s''' % b))
if symbol is None:
if len(free_symbols) == 1:
symbol = free_symbols.pop()
else:
raise ValueError(filldedent('''
The independent variable must be specified for a
multivariate equation.'''))
elif not getattr(symbol, 'is_Symbol', False):
raise ValueError('A Symbol must be given, not type %s: %s' %
(type(symbol), symbol))
if isinstance(f, Eq):
from sympy.core import Add
f = Add(f.lhs, - f.rhs, evaluate=False)
elif f.is_Relational:
if not domain.is_subset(S.Reals):
raise NotImplementedError(filldedent('''
Inequalities in the complex domain are
not supported. Try the real domain by
setting domain=S.Reals'''))
try:
result = solve_univariate_inequality(
f, symbol, relational=False) - _invalid_solutions(
f, symbol, domain)
except NotImplementedError:
result = ConditionSet(symbol, f, domain)
return result
return _solveset(f, symbol, domain, _check=True)
def _invalid_solutions(f, symbol, domain):
bad = S.EmptySet
for d in denoms(f):
bad += _solveset(d, symbol, domain, _check=False)
return bad
def solveset_real(f, symbol):
return solveset(f, symbol, S.Reals)
def solveset_complex(f, symbol):
return solveset(f, symbol, S.Complexes)
###############################################################################
################################ LINSOLVE #####################################
###############################################################################
def linear_eq_to_matrix(equations, *symbols):
r"""
Converts a given System of Equations into Matrix form.
Here `equations` must be a linear system of equations in
`symbols`. The order of symbols in input `symbols` will
determine the order of coefficients in the returned
Matrix.
The Matrix form corresponds to the augmented matrix form.
For example:
.. math:: 4x + 2y + 3z = 1
.. math:: 3x + y + z = -6
.. math:: 2x + 4y + 9z = 2
This system would return `A` & `b` as given below:
::
[ 4 2 3 ] [ 1 ]
A = [ 3 1 1 ] b = [-6 ]
[ 2 4 9 ] [ 2 ]
Examples
========
>>> from sympy import linear_eq_to_matrix, symbols
>>> x, y, z = symbols('x, y, z')
>>> eqns = [x + 2*y + 3*z - 1, 3*x + y + z + 6, 2*x + 4*y + 9*z - 2]
>>> A, b = linear_eq_to_matrix(eqns, [x, y, z])
>>> A
Matrix([
[1, 2, 3],
[3, 1, 1],
[2, 4, 9]])
>>> b
Matrix([
[ 1],
[-6],
[ 2]])
>>> eqns = [x + z - 1, y + z, x - y]
>>> A, b = linear_eq_to_matrix(eqns, [x, y, z])
>>> A
Matrix([
[1, 0, 1],
[0, 1, 1],
[1, -1, 0]])
>>> b
Matrix([
[1],
[0],
[0]])
* Symbolic coefficients are also supported
>>> a, b, c, d, e, f = symbols('a, b, c, d, e, f')
>>> eqns = [a*x + b*y - c, d*x + e*y - f]
>>> A, B = linear_eq_to_matrix(eqns, x, y)
>>> A
Matrix([
[a, b],
[d, e]])
>>> B
Matrix([
[c],
[f]])
"""
if not symbols:
raise ValueError('Symbols must be given, for which coefficients \
are to be found.')
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
M = Matrix([symbols])
# initialise Matrix with symbols + 1 columns
M = M.col_insert(len(symbols), Matrix([1]))
row_no = 1
for equation in equations:
f = sympify(equation)
if isinstance(f, Equality):
f = f.lhs - f.rhs
# Extract coeff of symbols
coeff_list = []
for symbol in symbols:
coeff_list.append(f.coeff(symbol))
# append constant term (term free from symbols)
coeff_list.append(-f.as_coeff_add(*symbols)[0])
# insert equations coeff's into rows
M = M.row_insert(row_no, Matrix([coeff_list]))
row_no += 1
# delete the initialised (Ist) trivial row
M.row_del(0)
A, b = M[:, :-1], M[:, -1:]
return A, b
def linsolve(system, *symbols):
r"""
Solve system of N linear equations with M variables, which
means both under - and overdetermined systems are supported.
The possible number of solutions is zero, one or infinite.
Zero solutions throws a ValueError, where as infinite
solutions are represented parametrically in terms of given
symbols. For unique solution a FiniteSet of ordered tuple
is returned.
All Standard input formats are supported:
For the given set of Equations, the respective input types
are given below:
.. math:: 3x + 2y - z = 1
.. math:: 2x - 2y + 4z = -2
.. math:: 2x - y + 2z = 0
* Augmented Matrix Form, `system` given below:
::
[3 2 -1 1]
system = [2 -2 4 -2]
[2 -1 2 0]
* List Of Equations Form
`system = [3x + 2y - z - 1, 2x - 2y + 4z + 2, 2x - y + 2z]`
* Input A & b Matrix Form (from Ax = b) are given as below:
::
[3 2 -1 ] [ 1 ]
A = [2 -2 4 ] b = [ -2 ]
[2 -1 2 ] [ 0 ]
`system = (A, b)`
Symbols to solve for should be given as input in all the
cases either in an iterable or as comma separated arguments.
This is done to maintain consistency in returning solutions
in the form of variable input by the user.
The algorithm used here is Gauss-Jordan elimination, which
results, after elimination, in an row echelon form matrix.
Returns
=======
A FiniteSet of ordered tuple of values of `symbols` for which
the `system` has solution.
Please note that general FiniteSet is unordered, the solution
returned here is not simply a FiniteSet of solutions, rather
it is a FiniteSet of ordered tuple, i.e. the first & only
argument to FiniteSet is a tuple of solutions, which is ordered,
& hence the returned solution is ordered.
Also note that solution could also have been returned as an
ordered tuple, FiniteSet is just a wrapper `{}` around
the tuple. It has no other significance except for
the fact it is just used to maintain a consistent output
format throughout the solveset.
Returns EmptySet(), if the linear system is inconsistent.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
Examples
========
>>> from sympy import Matrix, S, linsolve, symbols
>>> x, y, z = symbols("x, y, z")
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
>>> b = Matrix([3, 6, 9])
>>> A
Matrix([
[1, 2, 3],
[4, 5, 6],
[7, 8, 10]])
>>> b
Matrix([
[3],
[6],
[9]])
>>> linsolve((A, b), [x, y, z])
{(-1, 2, 0)}
* Parametric Solution: In case the system is under determined, the function
will return parametric solution in terms of the given symbols.
Free symbols in the system are returned as it is. For e.g. in the system
below, `z` is returned as the solution for variable z, which means z is a
free symbol, i.e. it can take arbitrary values.
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> b = Matrix([3, 6, 9])
>>> linsolve((A, b), [x, y, z])
{(z - 1, -2*z + 2, z)}
* List of Equations as input
>>> Eqns = [3*x + 2*y - z - 1, 2*x - 2*y + 4*z + 2, - x + S(1)/2*y - z]
>>> linsolve(Eqns, x, y, z)
{(1, -2, -2)}
* Augmented Matrix as input
>>> aug = Matrix([[2, 1, 3, 1], [2, 6, 8, 3], [6, 8, 18, 5]])
>>> aug
Matrix([
[2, 1, 3, 1],
[2, 6, 8, 3],
[6, 8, 18, 5]])
>>> linsolve(aug, x, y, z)
{(3/10, 2/5, 0)}
* Solve for symbolic coefficients
>>> a, b, c, d, e, f = symbols('a, b, c, d, e, f')
>>> eqns = [a*x + b*y - c, d*x + e*y - f]
>>> linsolve(eqns, x, y)
{((-b*f + c*e)/(a*e - b*d), (a*f - c*d)/(a*e - b*d))}
* A degenerate system returns solution as set of given
symbols.
>>> system = Matrix(([0,0,0], [0,0,0], [0,0,0]))
>>> linsolve(system, x, y)
{(x, y)}
* For an empty system linsolve returns empty set
>>> linsolve([ ], x)
EmptySet()
"""
if not system:
return S.EmptySet
if not symbols:
raise ValueError('Symbols must be given, for which solution of the '
'system is to be found.')
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
try:
sym = symbols[0].is_Symbol
except AttributeError:
sym = False
if not sym:
raise ValueError('Symbols or iterable of symbols must be given as '
'second argument, not type %s: %s' % (type(symbols[0]), symbols[0]))
# 1). Augmented Matrix input Form
if isinstance(system, Matrix):
A, b = system[:, :-1], system[:, -1:]
elif hasattr(system, '__iter__'):
# 2). A & b as input Form
if len(system) == 2 and system[0].is_Matrix:
A, b = system[0], system[1]
# 3). List of equations Form
if not system[0].is_Matrix:
A, b = linear_eq_to_matrix(system, symbols)
else:
raise ValueError("Invalid arguments")
# Solve using Gauss-Jordan elimination
try:
sol, params, free_syms = A.gauss_jordan_solve(b, freevar=True)
except ValueError:
# No solution
return EmptySet()
# Replace free parameters with free symbols
solution = []
if params:
for s in sol:
for k, v in enumerate(params):
s = s.xreplace({v: symbols[free_syms[k]]})
solution.append(simplify(s))
else:
for s in sol:
solution.append(simplify(s))
# Return solutions
solution = FiniteSet(tuple(solution))
return solution
|
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2011 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Implementation of TiledGrid3DAction actions
'''
__docformat__ = 'restructuredtext'
import random
from cocos.euclid import *
from basegrid_actions import *
from cocos.director import director
rr = random.randrange
__all__ = [ 'FadeOutTRTiles', # actions that don't modify the z coordinate
'FadeOutBLTiles',
'FadeOutUpTiles',
'FadeOutDownTiles',
'ShuffleTiles',
'TurnOffTiles',
'SplitRows',
'SplitCols',
'ShakyTiles3D', # actions that modify the z coordinate
'ShatteredTiles3D',
'WavesTiles3D',
'JumpTiles3D',
]
# Don't export this class
class Tile(object):
def __init__(self, position=(0,0), start_position=(0,0), delta=(0,0) ):
super(Tile,self).__init__()
self.position = position
self.start_position = start_position
self.delta = delta
def __repr__(self):
return "(start_pos: %s pos: %s delta:%s)" % (self.start_position, self.position, self.delta)
class ShakyTiles3D( TiledGrid3DAction ):
'''Simulates a shaky floor composed of tiles
Example::
scene.do( ShakyTiles3D( randrange=6, grid=(4,4), duration=10) )
'''
def init( self, randrange=6, *args, **kw ):
'''
:Parameters:
`randrange` : int
Number that will be used in random.randrange( -randrange, randrange) to do the effect
'''
super(ShakyTiles3D,self).init(*args,**kw)
self.randrange = randrange
def update( self, t ):
for i in xrange(0, self.grid.x):
for j in xrange(0, self.grid.y):
coords = self.get_original_tile(i,j)
for k in xrange(0,len(coords),3):
x = rr(-self.randrange, self.randrange+1)
y = rr(-self.randrange, self.randrange+1)
z = rr(-self.randrange, self.randrange+1)
coords[k] += x
coords[k+1] += y
coords[k+2] += z
self.set_tile(i,j,coords)
class ShatteredTiles3D( TiledGrid3DAction ):
'''ShatterTiles shatters the tiles according to a random value.
It is similar to shakes (see `ShakyTiles3D`) the tiles just one frame, and then continue with
that state for duration time.
Example::
scene.do( ShatteredTiles3D( randrange=12 ) )
'''
def init( self, randrange=6, *args, **kw ):
'''
:Parameters:
`randrange` : int
Number that will be used in random.randrange( -randrange, randrange) to do the effect
'''
super(ShatteredTiles3D,self).init(*args,**kw)
self.randrange = randrange
self._once = False
def update( self, t ):
if not self._once:
for i in xrange(0, self.grid.x):
for j in xrange(0, self.grid.y):
coords = self.get_original_tile(i,j)
for k in xrange(0,len(coords),3):
x = rr(-self.randrange, self.randrange+1)
y = rr(-self.randrange, self.randrange+1)
z = rr(-self.randrange, self.randrange+1)
coords[k] += x
coords[k+1] += y
coords[k+2] += z
self.set_tile(i,j,coords)
self._once = True
class ShuffleTiles( TiledGrid3DAction ):
'''ShuffleTiles moves the tiles randomly across the screen.
To put them back use: Reverse( ShuffleTiles() ) with the same seed parameter.
Example::
scene.do( ShuffleTiles( grid=(4,4), seed=1, duration=10) )
'''
def init(self, seed=-1, *args, **kw):
'''
:Parameters:
`seed` : float
Seed for the random in the shuffle.
'''
super(ShuffleTiles,self).init(*args, **kw)
self.seed = seed
def start(self):
super(ShuffleTiles,self).start()
self.tiles = {}
self._once = False
if self.seed != -1:
random.seed( self.seed )
# random positions
self.nr_of_tiles = self.grid.x * self.grid.y
self.tiles_order = range(self.nr_of_tiles )
random.shuffle( self.tiles_order )
for i in xrange(self.grid.x):
for j in xrange(self.grid.y):
self.tiles[(i,j)] = Tile( position = Point2(i,j),
start_position = Point2(i,j),
delta= self._get_delta(i,j) )
def place_tile(self, i, j):
t = self.tiles[(i,j)]
coords = self.get_original_tile(i,j)
for k in xrange(0,len(coords),3):
coords[k] += int( t.position.x * self.target.grid.x_step )
coords[k+1] += int( t.position.y * self.target.grid.y_step )
self.set_tile(i,j,coords)
def update(self, t ):
for i in xrange(0, self.grid.x):
for j in xrange(0, self.grid.y):
self.tiles[(i,j)].position = self.tiles[(i,j)].delta * t
self.place_tile(i,j)
# private method
def _get_delta(self, x, y):
idx = x * self.grid.y + y
i,j = divmod( self.tiles_order[idx], self.grid.y )
return Point2(i,j)-Point2(x,y)
class FadeOutTRTiles( TiledGrid3DAction ):
'''Fades out each tile following a diagonal Top-Right path until all the tiles are faded out.
Example::
scene.do( FadeOutTRTiles( grid=(16,12), duration=10) )
'''
def update( self, t ):
# direction right - up
for i in xrange(self.grid.x):
for j in xrange(self.grid.y):
distance = self.test_func(i,j,t)
if distance == 0:
self.turn_off_tile(i,j)
elif distance < 1:
self.transform_tile(i,j,distance)
else:
self.turn_on_tile(i,j)
def turn_on_tile(self, x,y):
self.set_tile(x,y, self.get_original_tile(x,y) )
def transform_tile(self, x, y, t ):
coords = self.get_original_tile(x,y)
for c in xrange( len(coords) ):
# x
if c == 0*3 or c == 3*3:
coords[c] = coords[c] + (self.target.grid.x_step / 2.0) * (1-t)
elif c == 1*3 or c == 2*3:
coords[c] = coords[c] - (self.target.grid.x_step / 2.0) * (1-t)
# y
if c == 0*3+1 or c == 1*3+1:
coords[c] = coords[c] + (self.target.grid.y_step / 2.0) * (1-t)
elif c == 2*3+1 or c == 3*3+1:
coords[c] = coords[c] - (self.target.grid.y_step / 2.0) * (1-t)
self.set_tile(x,y,coords)
def turn_off_tile( self,x,y):
self.set_tile(x,y,[0,0,0,0,0,0,0,0,0,0,0,0] )
def test_func(self, i,j, t ):
x,y = self.grid * t
if x+y==0:
return 1
return pow( (i+j) / float(x+y), 6 )
class FadeOutBLTiles( FadeOutTRTiles):
'''Fades out each tile following an Bottom-Left path until all the tiles are faded out.
Example::
scene.do( FadeOutBLTiles( grid=(16,12), duration=5) )
'''
def test_func(self, i,j,t):
x,y = self.grid * (1-t)
if i+j==0:
return 1
return pow( (x+y) / float(i+j), 6)
class FadeOutUpTiles( FadeOutTRTiles):
'''Fades out each tile following an upwards path until all the tiles are faded out.
Example::
scene.do( FadeOutUpTiles( grid=(16,12), duration=5) )
'''
def test_func(self, i,j, t):
x,y = self.grid * t
if y==0:
return 1
return pow( (j) / float(y), 6 )
def transform_tile(self, x, y, t ):
coords = self.get_original_tile(x,y)
for c in xrange( len(coords) ):
# y
if c == 0*3+1 or c == 1*3+1:
coords[c] = coords[c] + (self.target.grid.y_step / 2.0) * (1-t)
elif c == 2*3+1 or c == 3*3+1:
coords[c] = coords[c] - (self.target.grid.y_step / 2.0) * (1-t)
self.set_tile(x,y,coords)
class FadeOutDownTiles( FadeOutUpTiles):
'''Fades out each tile following an downwards path until all the tiles are faded out.
Example::
scene.do( FadeOutDownTiles( grid=(16,12), duration=5) )
'''
def test_func(self, i,j, t):
x,y = self.grid * (1-t)
if j==0:
return 1
return pow( (y) / float(j), 6 )
class TurnOffTiles( TiledGrid3DAction ):
'''TurnOffTiles turns off each in random order
Example::
scene.do( TurnOffTiles( grid=(16,12), seed=1, duration=10) )
'''
def init(self, seed=-1, *args, **kw):
super(TurnOffTiles,self).init( *args, **kw )
self.seed = seed
def start(self):
super(TurnOffTiles,self).start()
if self.seed != -1:
random.seed( self.seed )
self.nr_of_tiles = self.grid.x * self.grid.y
self.tiles_order = range(self.nr_of_tiles )
random.shuffle( self.tiles_order )
def update( self, t ):
l = int( t * self.nr_of_tiles )
for i in xrange( self.nr_of_tiles):
t = self.tiles_order[i]
if i < l:
self.turn_off_tile(t)
else:
self.turn_on_tile(t)
def get_tile_pos(self, idx):
return divmod(idx, self.grid.y)
def turn_on_tile(self, t):
x,y = self.get_tile_pos(t)
self.set_tile(x,y, self.get_original_tile(x,y) )
def turn_off_tile(self,t):
x,y = self.get_tile_pos(t)
self.set_tile(x,y,[0,0,0,0,0,0,0,0,0,0,0,0] )
class WavesTiles3D( TiledGrid3DAction ):
'''Simulates waves using the math.sin() function in the z-axis of each tile
Example::
scene.do( WavesTiles3D( waves=5, amplitude=120, grid=(16,16), duration=10) )
'''
def init( self, waves=4, amplitude=120, *args, **kw ):
'''
:Parameters:
`waves` : int
Number of waves (2 * pi) that the action will perform. Default is 4
`amplitude` : int
Wave amplitude (height). Default is 20
'''
super(WavesTiles3D, self).init( *args, **kw )
#: Total number of waves to perform
self.waves=waves
#: amplitude rate. Default: 1.0
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
self.amplitude=amplitude
def update( self, t ):
for i in xrange(0, self.grid.x):
for j in xrange(0, self.grid.y):
coords = self.get_original_tile(i,j)
x = coords[0]
y = coords[1]
z = (math.sin(t*math.pi*self.waves*2 + (y+x) * .01) * self.amplitude * self.amplitude_rate )
for k in xrange( 0,len(coords),3 ):
coords[k+2] += z
self.set_tile( i,j, coords )
class JumpTiles3D( TiledGrid3DAction ):
'''Odd tiles will perform a jump in the z-axis using the sine function,
while the even tiles will perform a jump using sine+pi function
Example::
scene.do( JumpTiles3D( jumps=5, amplitude=40, grid=(16,16), duration=10) )
'''
def init( self, jumps=4, amplitude=20, *args, **kw ):
'''
:Parameters:
`jumps` : int
Number of jumps(2 * pi) that the action will perform. Default is 4
`amplitude` : int
Wave amplitude (height). Default is 20
'''
super(JumpTiles3D, self).init( *args, **kw )
#: Total number of jumps to perform
self.jumps=jumps
#: amplitude rate. Default: 1.0
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
self.amplitude=amplitude
def update( self, t ):
sinz = (math.sin(t*math.pi*self.jumps*2 + (0) * .01) * self.amplitude * self.amplitude_rate )
sinz2= (math.sin(math.pi+t*math.pi*self.jumps*2 + (0) * .01) * self.amplitude * self.amplitude_rate )
for i in xrange(0, self.grid.x):
for j in xrange(0, self.grid.y):
coords = self.get_original_tile(i,j)
for k in xrange( 0,len(coords),3 ):
if (i+j) % 2 == 0:
coords[k+2] += sinz
else:
coords[k+2] += sinz2
self.set_tile( i,j, coords )
class SplitRows( TiledGrid3DAction ):
'''Split the screen in a number of rows, and move
these rows away from the screen.
The odds rows are moved to the left, while the even rows are moved to
the right.
Example::
scene.do( SplitRows( rows=3, duration=2) )
'''
def init( self, rows=9, grid=(-1,-1), *args, **kw ):
'''
:Parameters:
`rows` : int
Number of rows that will have the effect. Default: 9
'''
if grid != (-1,-1):
raise Exception("This action doesn't receives the grid argument")
grid = (1,rows)
self.rows = rows
super(SplitRows, self).init( grid, *args, **kw )
def update( self, t ):
x,y = director.get_window_size()
for j in xrange(0, self.grid.y):
coords = self.get_original_tile(0,j)
for c in xrange(0, len(coords), 3):
direction = 1
if j % 2 == 0:
direction = -1
coords[c] += direction * x * t
self.set_tile( 0,j, coords )
class SplitCols( TiledGrid3DAction ):
'''Split the screen in a number of columns, and move
these columns away from the screen.
The odds columns are moved to the upwards, while the even
columns are moved to the downwards.
Example::
scene.do( SplitCols( cols=3, duration=2) )
'''
def init( self, cols=9, grid=(-1,-1), *args, **kw ):
'''
:Parameters:
`cols` : int
Number of columns that will have the effect. Default: 9
'''
if grid != (-1,-1):
raise Exception("This action doesn't receives the grid argument")
grid = (cols,1)
self.cols = cols
super(SplitCols, self).init( grid, *args, **kw )
def update( self, t ):
x,y = director.get_window_size()
for i in xrange(0, self.grid.x):
coords = self.get_original_tile(i,0)
for c in xrange(0, len(coords), 3):
direction = 1
if i % 2 == 0:
direction = -1
coords[c+1] += direction * y * t
self.set_tile( i,0, coords )
|
|
# encoding: utf-8
"""
USAGE:
twitter [action] [options]
ACTIONS:
authorize authorize the command-line tool to interact with Twitter
follow follow a user
friends get latest tweets from your friends (default action)
help print this help text that you are currently reading
leave stop following a user
list get list of a user's lists; give a list name to get
tweets from that list
mylist get list of your lists; give a list name to get tweets
from that list
pyprompt start a Python prompt for interacting with the twitter
object directly
replies get latest replies to you
search search twitter (Beware: octothorpe, escape it)
set set your twitter status
shell login to the twitter shell
rate get your current rate limit status (remaining API reqs)
OPTIONS:
-r --refresh run this command forever, polling every once
in a while (default: every 5 minutes)
-R --refresh-rate <rate> set the refresh rate (in seconds)
-f --format <format> specify the output format for status updates
-c --config <filename> read username and password from given config
file (default ~/.twitter)
-l --length <count> specify number of status updates shown
(default: 20, max: 200)
-t --timestamp show time before status lines
-d --datestamp show date before status lines
--no-ssl use less-secure HTTP instead of HTTPS
--oauth <filename> filename to read/store oauth credentials to
FORMATS for the --format option
default one line per status
verbose multiple lines per status, more verbose status info
urls nothing but URLs
ansi ansi colour (rainbow mode)
CONFIG FILES
The config file should be placed in your home directory and be named .twitter.
It must contain a [twitter] header, and all the desired options you wish to
set, like so:
[twitter]
format: <desired_default_format_for_output>
prompt: <twitter_shell_prompt e.g. '[cyan]twitter[R]> '>
OAuth authentication tokens are stored in the file .twitter_oauth in your
home directory.
"""
from __future__ import print_function
try:
input = __builtins__['raw_input']
except (AttributeError, KeyError):
pass
CONSUMER_KEY = 'uS6hO2sV6tDKIOeVjhnFnQ'
CONSUMER_SECRET = 'MEYTOS97VvlHX7K1rwHPEqVpTSqZ71HtvoK4sVuYk'
import sys
import time
from getopt import gnu_getopt as getopt, GetoptError
from getpass import getpass
import re
import os.path
import locale
import string
try:
from ConfigParser import SafeConfigParser
except ImportError:
from configparser import ConfigParser as SafeConfigParser
import datetime
try:
from urllib.parse import quote
except ImportError:
from urllib2 import quote
try:
import HTMLParser
except ImportError:
import html.parser as HTMLParser
import webbrowser
from .api import Twitter, TwitterError
from .oauth import OAuth, write_token_file, read_token_file
from .oauth_dance import oauth_dance
from . import ansi
from .util import smrt_input, printNicely
OPTIONS = {
'action': 'friends',
'refresh': False,
'refresh_rate': 600,
'format': 'default',
'prompt': '[cyan]twitter[R]> ',
'config_filename': os.environ.get('HOME', os.environ.get('USERPROFILE', '')) + os.sep + '.twitter',
'oauth_filename': os.environ.get('HOME', os.environ.get('USERPROFILE', '')) + os.sep + '.twitter_oauth',
'length': 20,
'timestamp': False,
'datestamp': False,
'extra_args': [],
'secure': True,
'invert_split': False,
'force-ansi': False,
}
gHtmlParser = HTMLParser.HTMLParser()
hashtagRe = re.compile(r'(?P<hashtag>#\S+)')
profileRe = re.compile(r'(?P<profile>\@\S+)')
ansiFormatter = ansi.AnsiCmd(False)
def parse_args(args, options):
long_opts = ['help', 'format=', 'refresh', 'oauth=',
'refresh-rate=', 'config=', 'length=', 'timestamp',
'datestamp', 'no-ssl', 'force-ansi']
short_opts = "e:p:f:h?rR:c:l:td"
opts, extra_args = getopt(args, short_opts, long_opts)
if extra_args and hasattr(extra_args[0], 'decode'):
extra_args = [arg.decode(locale.getpreferredencoding())
for arg in extra_args]
for opt, arg in opts:
if opt in ('-f', '--format'):
options['format'] = arg
elif opt in ('-r', '--refresh'):
options['refresh'] = True
elif opt in ('-R', '--refresh-rate'):
options['refresh_rate'] = int(arg)
elif opt in ('-l', '--length'):
options["length"] = int(arg)
elif opt in ('-t', '--timestamp'):
options["timestamp"] = True
elif opt in ('-d', '--datestamp'):
options["datestamp"] = True
elif opt in ('-?', '-h', '--help'):
options['action'] = 'help'
elif opt in ('-c', '--config'):
options['config_filename'] = arg
elif opt == '--no-ssl':
options['secure'] = False
elif opt == '--oauth':
options['oauth_filename'] = arg
elif opt == '--force-ansi':
options['force-ansi'] = True
if extra_args and not ('action' in options and options['action'] == 'help'):
options['action'] = extra_args[0]
options['extra_args'] = extra_args[1:]
def get_time_string(status, options, format="%a %b %d %H:%M:%S +0000 %Y"):
timestamp = options["timestamp"]
datestamp = options["datestamp"]
t = time.strptime(status['created_at'], format)
i_hate_timezones = time.timezone
if (time.daylight):
i_hate_timezones = time.altzone
dt = datetime.datetime(*t[:-3]) - datetime.timedelta(
seconds=i_hate_timezones)
t = dt.timetuple()
if timestamp and datestamp:
return time.strftime("%Y-%m-%d %H:%M:%S ", t)
elif timestamp:
return time.strftime("%H:%M:%S ", t)
elif datestamp:
return time.strftime("%Y-%m-%d ", t)
return ""
def reRepl(m):
ansiTypes = {
'clear': ansiFormatter.cmdReset(),
'hashtag': ansiFormatter.cmdBold(),
'profile': ansiFormatter.cmdUnderline(),
}
s = None
try:
mkey = m.lastgroup
if m.group(mkey):
s = '%s%s%s' % (ansiTypes[mkey], m.group(mkey), ansiTypes['clear'])
except IndexError:
pass
return s
def replaceInStatus(status):
txt = gHtmlParser.unescape(status)
txt = re.sub(hashtagRe, reRepl, txt)
txt = re.sub(profileRe, reRepl, txt)
return txt
class StatusFormatter(object):
def __call__(self, status, options):
return ("%s%s %s" % (
get_time_string(status, options),
status['user']['screen_name'], gHtmlParser.unescape(status['text'])))
class AnsiStatusFormatter(object):
def __init__(self):
self._colourMap = ansi.ColourMap()
def __call__(self, status, options):
colour = self._colourMap.colourFor(status['user']['screen_name'])
return ("%s%s%s%s %s" % (
get_time_string(status, options),
ansiFormatter.cmdColour(colour), status['user']['screen_name'],
ansiFormatter.cmdReset(), replaceInStatus(status['text'])))
class VerboseStatusFormatter(object):
def __call__(self, status, options):
return ("-- %s (%s) on %s\n%s\n" % (
status['user']['screen_name'],
status['user']['location'],
status['created_at'],
gHtmlParser.unescape(status['text'])))
class URLStatusFormatter(object):
urlmatch = re.compile(r'https?://\S+')
def __call__(self, status, options):
urls = self.urlmatch.findall(status['text'])
return '\n'.join(urls) if urls else ""
class ListsFormatter(object):
def __call__(self, list):
if list['description']:
list_str = "%-30s (%s)" % (list['name'], list['description'])
else:
list_str = "%-30s" % (list['name'])
return "%s\n" % list_str
class ListsVerboseFormatter(object):
def __call__(self, list):
list_str = "%-30s\n description: %s\n members: %s\n mode:%s\n" % (list['name'], list['description'], list['member_count'], list['mode'])
return list_str
class AnsiListsFormatter(object):
def __init__(self):
self._colourMap = ansi.ColourMap()
def __call__(self, list):
colour = self._colourMap.colourFor(list['name'])
return ("%s%-15s%s %s" % (
ansiFormatter.cmdColour(colour), list['name'],
ansiFormatter.cmdReset(), list['description']))
class AdminFormatter(object):
def __call__(self, action, user):
user_str = "%s (%s)" % (user['screen_name'], user['name'])
if action == "follow":
return "You are now following %s.\n" % (user_str)
else:
return "You are no longer following %s.\n" % (user_str)
class VerboseAdminFormatter(object):
def __call__(self, action, user):
return("-- %s: %s (%s): %s" % (
"Following" if action == "follow" else "Leaving",
user['screen_name'],
user['name'],
user['url']))
class SearchFormatter(object):
def __call__(self, result, options):
return("%s%s %s" % (
get_time_string(result, options, "%a, %d %b %Y %H:%M:%S +0000"),
result['from_user'], result['text']))
class VerboseSearchFormatter(SearchFormatter):
pass # Default to the regular one
class URLSearchFormatter(object):
urlmatch = re.compile(r'https?://\S+')
def __call__(self, result, options):
urls = self.urlmatch.findall(result['text'])
return '\n'.join(urls) if urls else ""
class AnsiSearchFormatter(object):
def __init__(self):
self._colourMap = ansi.ColourMap()
def __call__(self, result, options):
colour = self._colourMap.colourFor(result['from_user'])
return ("%s%s%s%s %s" % (
get_time_string(result, options, "%a, %d %b %Y %H:%M:%S +0000"),
ansiFormatter.cmdColour(colour), result['from_user'],
ansiFormatter.cmdReset(), result['text']))
_term_encoding = None
def get_term_encoding():
global _term_encoding
if not _term_encoding:
lang = os.getenv('LANG', 'unknown.UTF-8').split('.')
if lang[1:]:
_term_encoding = lang[1]
else:
_term_encoding = 'UTF-8'
return _term_encoding
formatters = {}
status_formatters = {
'default': StatusFormatter,
'verbose': VerboseStatusFormatter,
'urls': URLStatusFormatter,
'ansi': AnsiStatusFormatter
}
formatters['status'] = status_formatters
admin_formatters = {
'default': AdminFormatter,
'verbose': VerboseAdminFormatter,
'urls': AdminFormatter,
'ansi': AdminFormatter
}
formatters['admin'] = admin_formatters
search_formatters = {
'default': SearchFormatter,
'verbose': VerboseSearchFormatter,
'urls': URLSearchFormatter,
'ansi': AnsiSearchFormatter
}
formatters['search'] = search_formatters
lists_formatters = {
'default': ListsFormatter,
'verbose': ListsVerboseFormatter,
'urls': None,
'ansi': AnsiListsFormatter
}
formatters['lists'] = lists_formatters
def get_formatter(action_type, options):
formatters_dict = formatters.get(action_type)
if (not formatters_dict):
raise TwitterError(
"There was an error finding a class of formatters for your type (%s)"
% (action_type))
f = formatters_dict.get(options['format'])
if (not f):
raise TwitterError(
"Unknown formatter '%s' for status actions" % (options['format']))
return f()
class Action(object):
def ask(self, subject='perform this action', careful=False):
'''
Requests fromt he user using `raw_input` if `subject` should be
performed. When `careful`, the default answer is NO, otherwise YES.
Returns the user answer in the form `True` or `False`.
'''
sample = '(y/N)'
if not careful:
sample = '(Y/n)'
prompt = 'You really want to %s %s? ' % (subject, sample)
try:
answer = input(prompt).lower()
if careful:
return answer in ('yes', 'y')
else:
return answer not in ('no', 'n')
except EOFError:
print(file=sys.stderr) # Put Newline since Enter was never pressed
# TODO:
# Figure out why on OS X the raw_input keeps raising
# EOFError and is never able to reset and get more input
# Hint: Look at how IPython implements their console
default = True
if careful:
default = False
return default
def __call__(self, twitter, options):
action = actions.get(options['action'], NoSuchAction)()
try:
doAction = lambda : action(twitter, options)
if (options['refresh'] and isinstance(action, StatusAction)):
while True:
doAction()
sys.stdout.flush()
time.sleep(options['refresh_rate'])
else:
doAction()
except KeyboardInterrupt:
print('\n[Keyboard Interrupt]', file=sys.stderr)
pass
class NoSuchActionError(Exception):
pass
class NoSuchAction(Action):
def __call__(self, twitter, options):
raise NoSuchActionError("No such action: %s" % (options['action']))
class StatusAction(Action):
def __call__(self, twitter, options):
statuses = self.getStatuses(twitter, options)
sf = get_formatter('status', options)
for status in statuses:
statusStr = sf(status, options)
if statusStr.strip():
printNicely(statusStr)
class SearchAction(Action):
def __call__(self, twitter, options):
# We need to be pointing at search.twitter.com to work, and it is less
# tangly to do it here than in the main()
twitter.domain = "search.twitter.com"
twitter.uriparts = ()
# We need to bypass the TwitterCall parameter encoding, so we
# don't encode the plus sign, so we have to encode it ourselves
query_string = "+".join(
[quote(term)
for term in options['extra_args']])
results = twitter.search(q=query_string)['results']
f = get_formatter('search', options)
for result in results:
resultStr = f(result, options)
if resultStr.strip():
printNicely(resultStr)
class AdminAction(Action):
def __call__(self, twitter, options):
if not (options['extra_args'] and options['extra_args'][0]):
raise TwitterError("You need to specify a user (screen name)")
af = get_formatter('admin', options)
try:
user = self.getUser(twitter, options['extra_args'][0])
except TwitterError as e:
print("There was a problem following or leaving the specified user.")
print("You may be trying to follow a user you are already following;")
print("Leaving a user you are not currently following;")
print("Or the user may not exist.")
print("Sorry.")
print()
print(e)
else:
printNicely(af(options['action'], user))
class ListsAction(StatusAction):
def getStatuses(self, twitter, options):
if not options['extra_args']:
raise TwitterError("Please provide a user to query for lists")
screen_name = options['extra_args'][0]
if not options['extra_args'][1:]:
lists = twitter.lists.list(screen_name=screen_name)
if not lists:
printNicely("This user has no lists.")
for list in lists:
lf = get_formatter('lists', options)
printNicely(lf(list))
return []
else:
return reversed(twitter.user.lists.list.statuses(
user=screen_name, list=options['extra_args'][1]))
class MyListsAction(ListsAction):
def getStatuses(self, twitter, options):
screen_name = twitter.account.verify_credentials()['screen_name']
options['extra_args'].insert(0, screen_name)
return ListsAction.getStatuses(self, twitter, options)
class FriendsAction(StatusAction):
def getStatuses(self, twitter, options):
return reversed(twitter.statuses.home_timeline(count=options["length"]))
class RepliesAction(StatusAction):
def getStatuses(self, twitter, options):
return reversed(twitter.statuses.mentions_timeline(count=options["length"]))
class FollowAction(AdminAction):
def getUser(self, twitter, user):
return twitter.friendships.create(id=user)
class LeaveAction(AdminAction):
def getUser(self, twitter, user):
return twitter.friendships.destroy(id=user)
class SetStatusAction(Action):
def __call__(self, twitter, options):
statusTxt = (" ".join(options['extra_args'])
if options['extra_args']
else str(input("message: ")))
replies = []
ptr = re.compile("@[\w_]+")
while statusTxt:
s = ptr.match(statusTxt)
if s and s.start() == 0:
replies.append(statusTxt[s.start():s.end()])
statusTxt = statusTxt[s.end() + 1:]
else:
break
replies = " ".join(replies)
if len(replies) >= 140:
# just go back
statusTxt = replies
replies = ""
splitted = []
while statusTxt:
limit = 140 - len(replies)
if len(statusTxt) > limit:
end = string.rfind(statusTxt, ' ', 0, limit)
else:
end = limit
splitted.append(" ".join((replies, statusTxt[:end])))
statusTxt = statusTxt[end:]
if options['invert_split']:
splitted.reverse()
for status in splitted:
twitter.statuses.update(status=status)
class TwitterShell(Action):
def render_prompt(self, prompt):
'''Parses the `prompt` string and returns the rendered version'''
prompt = prompt.strip("'").replace("\\'", "'")
for colour in ansi.COLOURS_NAMED:
if '[%s]' % (colour) in prompt:
prompt = prompt.replace(
'[%s]' % (colour), ansiFormatter.cmdColourNamed(colour))
prompt = prompt.replace('[R]', ansiFormatter.cmdReset())
return prompt
def __call__(self, twitter, options):
prompt = self.render_prompt(options.get('prompt', 'twitter> '))
while True:
options['action'] = ""
try:
args = input(prompt).split()
parse_args(args, options)
if not options['action']:
continue
elif options['action'] == 'exit':
raise SystemExit(0)
elif options['action'] == 'shell':
print('Sorry Xzibit does not work here!', file=sys.stderr)
continue
elif options['action'] == 'help':
print('''\ntwitter> `action`\n
The Shell Accepts all the command line actions along with:
exit Leave the twitter shell (^D may also be used)
Full CMD Line help is appended below for your convinience.''', file=sys.stderr)
Action()(twitter, options)
options['action'] = ''
except NoSuchActionError as e:
print(e, file=sys.stderr)
except KeyboardInterrupt:
print('\n[Keyboard Interrupt]', file=sys.stderr)
except EOFError:
print(file=sys.stderr)
leaving = self.ask(subject='Leave')
if not leaving:
print('Excellent!', file=sys.stderr)
else:
raise SystemExit(0)
class PythonPromptAction(Action):
def __call__(self, twitter, options):
try:
while True:
smrt_input(globals(), locals())
except EOFError:
pass
class HelpAction(Action):
def __call__(self, twitter, options):
print(__doc__)
class DoNothingAction(Action):
def __call__(self, twitter, options):
pass
class RateLimitStatus(Action):
def __call__(self, twitter, options):
rate = twitter.account.rate_limit_status()
print("Remaining API requests: %s / %s (hourly limit)" % (rate['remaining_hits'], rate['hourly_limit']))
print("Next reset in %ss (%s)" % (int(rate['reset_time_in_seconds'] - time.time()),
time.asctime(time.localtime(rate['reset_time_in_seconds']))))
actions = {
'authorize' : DoNothingAction,
'follow' : FollowAction,
'friends' : FriendsAction,
'list' : ListsAction,
'mylist' : MyListsAction,
'help' : HelpAction,
'leave' : LeaveAction,
'pyprompt' : PythonPromptAction,
'replies' : RepliesAction,
'search' : SearchAction,
'set' : SetStatusAction,
'shell' : TwitterShell,
'rate' : RateLimitStatus,
}
def loadConfig(filename):
options = dict(OPTIONS)
if os.path.exists(filename):
cp = SafeConfigParser()
cp.read([filename])
for option in ('format', 'prompt'):
if cp.has_option('twitter', option):
options[option] = cp.get('twitter', option)
# process booleans
for option in ('invert_split',):
if cp.has_option('twitter', option):
options[option] = cp.getboolean('twitter', option)
return options
def main(args=sys.argv[1:]):
arg_options = {}
try:
parse_args(args, arg_options)
except GetoptError as e:
print("I can't do that, %s." % (e), file=sys.stderr)
print(file=sys.stderr)
raise SystemExit(1)
config_path = os.path.expanduser(
arg_options.get('config_filename') or OPTIONS.get('config_filename'))
config_options = loadConfig(config_path)
# Apply the various options in order, the most important applied last.
# Defaults first, then what's read from config file, then command-line
# arguments.
options = dict(OPTIONS)
for d in config_options, arg_options:
for k, v in list(d.items()):
if v: options[k] = v
if options['refresh'] and options['action'] not in (
'friends', 'replies'):
print("You can only refresh the friends or replies actions.", file=sys.stderr)
print("Use 'twitter -h' for help.", file=sys.stderr)
return 1
oauth_filename = os.path.expanduser(options['oauth_filename'])
if (options['action'] == 'authorize'
or not os.path.exists(oauth_filename)):
oauth_dance(
"the Command-Line Tool", CONSUMER_KEY, CONSUMER_SECRET,
options['oauth_filename'])
global ansiFormatter
ansiFormatter = ansi.AnsiCmd(options["force-ansi"])
oauth_token, oauth_token_secret = read_token_file(oauth_filename)
twitter = Twitter(
auth=OAuth(
oauth_token, oauth_token_secret, CONSUMER_KEY, CONSUMER_SECRET),
secure=options['secure'],
api_version='1.1',
domain='api.twitter.com')
try:
Action()(twitter, options)
except NoSuchActionError as e:
print(e, file=sys.stderr)
raise SystemExit(1)
except TwitterError as e:
print(str(e), file=sys.stderr)
print("Use 'twitter -h' for help.", file=sys.stderr)
raise SystemExit(1)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._packet_core_control_planes_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_resource_group_request, build_list_by_subscription_request, build_update_tags_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PacketCoreControlPlanesOperations:
"""PacketCoreControlPlanesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~mobile_network_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
packet_core_control_plane_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
packet_core_control_plane_name=packet_core_control_plane_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes/{packetCoreControlPlaneName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
packet_core_control_plane_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified packet core control plane.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param packet_core_control_plane_name: The name of the packet core control plane.
:type packet_core_control_plane_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
packet_core_control_plane_name=packet_core_control_plane_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes/{packetCoreControlPlaneName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
packet_core_control_plane_name: str,
**kwargs: Any
) -> "_models.PacketCoreControlPlane":
"""Gets information about the specified packet core control plane.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param packet_core_control_plane_name: The name of the packet core control plane.
:type packet_core_control_plane_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCoreControlPlane, or the result of cls(response)
:rtype: ~mobile_network_management_client.models.PacketCoreControlPlane
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCoreControlPlane"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
packet_core_control_plane_name=packet_core_control_plane_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCoreControlPlane', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes/{packetCoreControlPlaneName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
packet_core_control_plane_name: str,
parameters: "_models.PacketCoreControlPlane",
**kwargs: Any
) -> "_models.PacketCoreControlPlane":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCoreControlPlane"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PacketCoreControlPlane')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
packet_core_control_plane_name=packet_core_control_plane_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCoreControlPlane', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PacketCoreControlPlane', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes/{packetCoreControlPlaneName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
packet_core_control_plane_name: str,
parameters: "_models.PacketCoreControlPlane",
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCoreControlPlane"]:
"""Creates or updates a PacketCoreControlPlane.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param packet_core_control_plane_name: The name of the packet core control plane.
:type packet_core_control_plane_name: str
:param parameters: Parameters supplied to the create or update packet core control plane
operation.
:type parameters: ~mobile_network_management_client.models.PacketCoreControlPlane
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCoreControlPlane or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~mobile_network_management_client.models.PacketCoreControlPlane]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCoreControlPlane"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
packet_core_control_plane_name=packet_core_control_plane_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PacketCoreControlPlane', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes/{packetCoreControlPlaneName}'} # type: ignore
@distributed_trace_async
async def update_tags(
self,
resource_group_name: str,
packet_core_control_plane_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.PacketCoreControlPlane":
"""Updates a PacketCoreControlPlane update tags.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param packet_core_control_plane_name: The name of the packet core control plane.
:type packet_core_control_plane_name: str
:param parameters: Parameters supplied to update PacketCoreControlPlane tags.
:type parameters: ~mobile_network_management_client.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCoreControlPlane, or the result of cls(response)
:rtype: ~mobile_network_management_client.models.PacketCoreControlPlane
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCoreControlPlane"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'TagsObject')
request = build_update_tags_request(
resource_group_name=resource_group_name,
packet_core_control_plane_name=packet_core_control_plane_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update_tags.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCoreControlPlane', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes/{packetCoreControlPlaneName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.PacketCoreControlPlaneListResult"]:
"""Lists all the packetCoreControlPlanes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCoreControlPlaneListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~mobile_network_management_client.models.PacketCoreControlPlaneListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCoreControlPlaneListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PacketCoreControlPlaneListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PacketCoreControlPlaneListResult"]:
"""Lists all the packetCoreControlPlanes in a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCoreControlPlaneListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~mobile_network_management_client.models.PacketCoreControlPlaneListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCoreControlPlaneListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PacketCoreControlPlaneListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes'} # type: ignore
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Extensions supporting Federation."""
from keystone.auth import controllers as auth_controllers
from keystone.common import authorization
from keystone.common import controller
from keystone.common import dependency
from keystone.common import validation
from keystone.common import wsgi
from keystone import config
from keystone.contrib.federation import idp as keystone_idp
from keystone.contrib.federation import schema
from keystone.contrib.federation import utils
from keystone import exception
from keystone.models import token_model
CONF = config.CONF
class _ControllerBase(controller.V3Controller):
"""Base behaviors for federation controllers."""
@classmethod
def base_url(cls, context, path=None):
"""Construct a path and pass it to V3Controller.base_url method."""
path = '/OS-FEDERATION/' + cls.collection_name
return super(_ControllerBase, cls).base_url(context, path=path)
@dependency.requires('federation_api')
class IdentityProvider(_ControllerBase):
"""Identity Provider representation."""
collection_name = 'identity_providers'
member_name = 'identity_provider'
_mutable_parameters = frozenset(['description', 'enabled'])
_public_parameters = frozenset(['id', 'enabled', 'description', 'links'])
@classmethod
def _add_related_links(cls, context, ref):
"""Add URLs for entities related with Identity Provider.
Add URLs pointing to:
- protocols tied to the Identity Provider
"""
ref.setdefault('links', {})
base_path = ref['links'].get('self')
if base_path is None:
base_path = '/'.join([IdentityProvider.base_url(context),
ref['id']])
for name in ['protocols']:
ref['links'][name] = '/'.join([base_path, name])
@classmethod
def _add_self_referential_link(cls, context, ref):
id = ref.get('id')
self_path = '/'.join([cls.base_url(context), id])
ref.setdefault('links', {})
ref['links']['self'] = self_path
@classmethod
def wrap_member(cls, context, ref):
cls._add_self_referential_link(context, ref)
cls._add_related_links(context, ref)
ref = cls.filter_params(ref)
return {cls.member_name: ref}
@controller.protected()
def create_identity_provider(self, context, idp_id, identity_provider):
identity_provider = self._normalize_dict(identity_provider)
identity_provider.setdefault('enabled', False)
IdentityProvider.check_immutable_params(identity_provider)
idp_ref = self.federation_api.create_idp(idp_id, identity_provider)
response = IdentityProvider.wrap_member(context, idp_ref)
return wsgi.render_response(body=response, status=('201', 'Created'))
@controller.protected()
def list_identity_providers(self, context):
ref = self.federation_api.list_idps()
ref = [self.filter_params(x) for x in ref]
return IdentityProvider.wrap_collection(context, ref)
@controller.protected()
def get_identity_provider(self, context, idp_id):
ref = self.federation_api.get_idp(idp_id)
return IdentityProvider.wrap_member(context, ref)
@controller.protected()
def delete_identity_provider(self, context, idp_id):
self.federation_api.delete_idp(idp_id)
@controller.protected()
def update_identity_provider(self, context, idp_id, identity_provider):
identity_provider = self._normalize_dict(identity_provider)
IdentityProvider.check_immutable_params(identity_provider)
idp_ref = self.federation_api.update_idp(idp_id, identity_provider)
return IdentityProvider.wrap_member(context, idp_ref)
@dependency.requires('federation_api')
class FederationProtocol(_ControllerBase):
"""A federation protocol representation.
See IdentityProvider docstring for explanation on _mutable_parameters
and _public_parameters class attributes.
"""
collection_name = 'protocols'
member_name = 'protocol'
_public_parameters = frozenset(['id', 'mapping_id', 'links'])
_mutable_parameters = frozenset(['mapping_id'])
@classmethod
def _add_self_referential_link(cls, context, ref):
"""Add 'links' entry to the response dictionary.
Calls IdentityProvider.base_url() class method, as it constructs
proper URL along with the 'identity providers' part included.
:param ref: response dictionary
"""
ref.setdefault('links', {})
base_path = ref['links'].get('identity_provider')
if base_path is None:
base_path = [IdentityProvider.base_url(context), ref['idp_id']]
base_path = '/'.join(base_path)
self_path = [base_path, 'protocols', ref['id']]
self_path = '/'.join(self_path)
ref['links']['self'] = self_path
@classmethod
def _add_related_links(cls, context, ref):
"""Add new entries to the 'links' subdictionary in the response.
Adds 'identity_provider' key with URL pointing to related identity
provider as a value.
:param ref: response dictionary
"""
ref.setdefault('links', {})
base_path = '/'.join([IdentityProvider.base_url(context),
ref['idp_id']])
ref['links']['identity_provider'] = base_path
@classmethod
def wrap_member(cls, context, ref):
cls._add_related_links(context, ref)
cls._add_self_referential_link(context, ref)
ref = cls.filter_params(ref)
return {cls.member_name: ref}
@controller.protected()
def create_protocol(self, context, idp_id, protocol_id, protocol):
ref = self._normalize_dict(protocol)
FederationProtocol.check_immutable_params(ref)
ref = self.federation_api.create_protocol(idp_id, protocol_id, ref)
response = FederationProtocol.wrap_member(context, ref)
return wsgi.render_response(body=response, status=('201', 'Created'))
@controller.protected()
def update_protocol(self, context, idp_id, protocol_id, protocol):
ref = self._normalize_dict(protocol)
FederationProtocol.check_immutable_params(ref)
ref = self.federation_api.update_protocol(idp_id, protocol_id,
protocol)
return FederationProtocol.wrap_member(context, ref)
@controller.protected()
def get_protocol(self, context, idp_id, protocol_id):
ref = self.federation_api.get_protocol(idp_id, protocol_id)
return FederationProtocol.wrap_member(context, ref)
@controller.protected()
def list_protocols(self, context, idp_id):
protocols_ref = self.federation_api.list_protocols(idp_id)
protocols = list(protocols_ref)
return FederationProtocol.wrap_collection(context, protocols)
@controller.protected()
def delete_protocol(self, context, idp_id, protocol_id):
self.federation_api.delete_protocol(idp_id, protocol_id)
@dependency.requires('federation_api')
class MappingController(_ControllerBase):
collection_name = 'mappings'
member_name = 'mapping'
@controller.protected()
def create_mapping(self, context, mapping_id, mapping):
ref = self._normalize_dict(mapping)
utils.validate_mapping_structure(ref)
mapping_ref = self.federation_api.create_mapping(mapping_id, ref)
response = MappingController.wrap_member(context, mapping_ref)
return wsgi.render_response(body=response, status=('201', 'Created'))
@controller.protected()
def list_mappings(self, context):
ref = self.federation_api.list_mappings()
return MappingController.wrap_collection(context, ref)
@controller.protected()
def get_mapping(self, context, mapping_id):
ref = self.federation_api.get_mapping(mapping_id)
return MappingController.wrap_member(context, ref)
@controller.protected()
def delete_mapping(self, context, mapping_id):
self.federation_api.delete_mapping(mapping_id)
@controller.protected()
def update_mapping(self, context, mapping_id, mapping):
mapping = self._normalize_dict(mapping)
utils.validate_mapping_structure(mapping)
mapping_ref = self.federation_api.update_mapping(mapping_id, mapping)
return MappingController.wrap_member(context, mapping_ref)
class Auth(auth_controllers.Auth):
def federated_authentication(self, context, identity_provider, protocol):
"""Authenticate from dedicated url endpoint.
Build HTTP request body for federated authentication and inject
it into the ``authenticate_for_token`` function.
"""
auth = {
'identity': {
'methods': [protocol],
protocol: {
'identity_provider': identity_provider,
'protocol': protocol
}
}
}
return self.authenticate_for_token(context, auth=auth)
@validation.validated(schema.saml_create, 'auth')
def create_saml_assertion(self, context, auth):
"""Exchange a scoped token for a SAML assertion.
:param auth: Dictionary that contains a token id and region id
:returns: SAML Assertion based on properties from the token
"""
issuer = wsgi.Application.base_url(context, 'public')
region_id = auth['scope']['region']['id']
region = self.catalog_api.get_region(region_id)
recipient = region['url']
token_id = auth['identity']['token']['id']
token_data = self.token_provider_api.validate_token(token_id)
token_ref = token_model.KeystoneToken(token_id, token_data)
subject = token_ref.user_name
roles = token_ref.role_names
if token_ref.project_scoped:
project = token_ref.project_name
else:
raise ValueError(_('Use a project scoped token when attempting to'
'create a SAML assertion'))
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(issuer, recipient, subject, roles,
project)
return wsgi.render_response(body=response.to_string(),
status=('200', 'OK'),
headers=[('Content-Type', 'text/xml')])
@dependency.requires('assignment_api')
class DomainV3(controller.V3Controller):
collection_name = 'domains'
member_name = 'domain'
def __init__(self):
super(DomainV3, self).__init__()
self.get_member_from_driver = self.assignment_api.get_domain
@controller.protected()
def list_domains_for_groups(self, context):
"""List all domains available to an authenticated user's groups.
:param context: request context
:returns: list of accessible domains
"""
auth_context = context['environment'][authorization.AUTH_CONTEXT_ENV]
domains = self.assignment_api.list_domains_for_groups(
auth_context['group_ids'])
return DomainV3.wrap_collection(context, domains)
@dependency.requires('assignment_api')
class ProjectV3(controller.V3Controller):
collection_name = 'projects'
member_name = 'project'
def __init__(self):
super(ProjectV3, self).__init__()
self.get_member_from_driver = self.assignment_api.get_project
@controller.protected()
def list_projects_for_groups(self, context):
"""List all projects available to an authenticated user's groups.
:param context: request context
:returns: list of accessible projects
"""
auth_context = context['environment'][authorization.AUTH_CONTEXT_ENV]
projects = self.assignment_api.list_projects_for_groups(
auth_context['group_ids'])
return ProjectV3.wrap_collection(context, projects)
class SAMLMetadataV3(_ControllerBase):
member_name = 'metadata'
def get_metadata(self, context):
metadata_path = CONF.federation.idp_metadata_path
try:
with open(metadata_path, 'r') as metadata_handler:
metadata = metadata_handler.read()
except IOError as e:
# Raise HTTP 500 in case Metadata file cannot be read.
raise exception.MetadataFileError(reason=e)
return wsgi.render_response(body=metadata, status=('200', 'OK'),
headers=[('Content-Type', 'text/xml')])
|
|
# -*- coding: utf-8 -*-
import mock
import unittest
from nose.tools import * # noqa
from github3 import GitHubError
from github3.repos import Repository
from tests.base import OsfTestCase, get_default_metaschema
from tests.factories import ExternalAccountFactory, ProjectFactory, UserFactory
from framework.auth import Auth
from website.addons.github.exceptions import NotFoundError
from website.addons.github import settings as github_settings
from website.addons.github.model import GitHubUserSettings
from website.addons.github.model import GitHubNodeSettings
from website.addons.github.tests.factories import (
GitHubAccountFactory,
GitHubNodeSettingsFactory,
GitHubUserSettingsFactory
)
from website.addons.base.testing import models
from .utils import create_mock_github
mock_github = create_mock_github()
class TestNodeSettings(models.OAuthAddonNodeSettingsTestSuiteMixin, OsfTestCase):
short_name = 'github'
full_name = 'GitHub'
ExternalAccountFactory = GitHubAccountFactory
NodeSettingsFactory = GitHubNodeSettingsFactory
NodeSettingsClass = GitHubNodeSettings
UserSettingsFactory = GitHubUserSettingsFactory
## Mixin Overrides ##
def _node_settings_class_kwargs(self, node, user_settings):
return {
'user_settings': self.user_settings,
'repo': 'mock',
'user': 'abc',
'owner': self.node
}
def test_set_folder(self):
# GitHub doesn't use folderpicker, and the nodesettings model
# does not need a `set_repo` method
pass
def test_serialize_settings(self):
# GitHub's serialized_settings are a little different from
# common storage addons.
settings = self.node_settings.serialize_waterbutler_settings()
expected = {'owner': self.node_settings.user, 'repo': self.node_settings.repo}
assert_equal(settings, expected)
@mock.patch(
'website.addons.github.model.GitHubUserSettings.revoke_remote_oauth_access',
mock.PropertyMock()
)
def test_complete_has_auth_not_verified(self):
super(TestNodeSettings, self).test_complete_has_auth_not_verified()
@mock.patch('website.addons.github.api.GitHubClient.repos')
@mock.patch('website.addons.github.api.GitHubClient.my_org_repos')
def test_to_json(self, mock_org, mock_repos):
mock_repos.return_value = {}
mock_org.return_value = {}
super(TestNodeSettings, self).test_to_json()
@mock.patch('website.addons.github.api.GitHubClient.repos')
@mock.patch('website.addons.github.api.GitHubClient.my_org_repos')
def test_to_json_user_is_owner(self, mock_org, mock_repos):
mock_repos.return_value = {}
mock_org.return_value = {}
result = self.node_settings.to_json(self.user)
assert_true(result['user_has_auth'])
assert_equal(result['github_user'], 'abc')
assert_true(result['is_owner'])
assert_true(result['valid_credentials'])
assert_equal(result.get('repo_names', None), [])
@mock.patch('website.addons.github.api.GitHubClient.repos')
@mock.patch('website.addons.github.api.GitHubClient.my_org_repos')
def test_to_json_user_is_not_owner(self, mock_org, mock_repos):
mock_repos.return_value = {}
mock_org.return_value = {}
not_owner = UserFactory()
result = self.node_settings.to_json(not_owner)
assert_false(result['user_has_auth'])
assert_equal(result['github_user'], 'abc')
assert_false(result['is_owner'])
assert_true(result['valid_credentials'])
assert_equal(result.get('repo_names', None), None)
class TestUserSettings(models.OAuthAddonUserSettingTestSuiteMixin, OsfTestCase):
short_name = 'github'
full_name = 'GitHub'
ExternalAccountFactory = GitHubAccountFactory
def test_public_id(self):
assert_equal(self.user.external_accounts[0].display_name, self.user_settings.public_id)
class TestCallbacks(OsfTestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.project = ProjectFactory.build()
self.consolidated_auth = Auth(self.project.creator)
self.non_authenticator = UserFactory()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('github', auth=self.consolidated_auth)
self.project.creator.add_addon('github')
self.external_account = GitHubAccountFactory()
self.project.creator.external_accounts.append(self.external_account)
self.project.creator.save()
self.node_settings = self.project.get_addon('github')
self.user_settings = self.project.creator.get_addon('github')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.external_account = self.external_account
self.node_settings.save()
self.node_settings.set_auth
@mock.patch('website.addons.github.api.GitHubClient.repo')
def test_before_make_public(self, mock_repo):
mock_repo.side_effect = NotFoundError
result = self.node_settings.before_make_public(self.project)
assert_is(result, None)
@mock.patch('website.addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_public_gh_public(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
@mock.patch('website.addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_public_gh_private(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('website.addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_private_gh_public(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('website.addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_private_gh_private(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
def test_before_page_load_not_contributor(self):
message = self.node_settings.before_page_load(self.project, UserFactory())
assert_false(message)
def test_before_page_load_not_logged_in(self):
message = self.node_settings.before_page_load(self.project, None)
assert_false(message)
def test_before_remove_contributor_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.project.creator
)
assert_true(message)
def test_before_remove_contributor_not_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.non_authenticator
)
assert_false(message)
def test_after_remove_contributor_authenticator_self(self):
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, self.consolidated_auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_not_in("You can re-authenticate", message)
def test_after_remove_contributor_authenticator_not_self(self):
auth = Auth(user=self.non_authenticator)
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_in("You can re-authenticate", message)
def test_after_remove_contributor_not_authenticator(self):
self.node_settings.after_remove_contributor(
self.project, self.non_authenticator, self.consolidated_auth
)
assert_not_equal(
self.node_settings.user_settings,
None,
)
def test_after_fork_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.project.creator,
)
assert_equal(
self.node_settings.user_settings,
clone.user_settings,
)
def test_after_fork_not_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.non_authenticator,
)
assert_equal(
clone.user_settings,
None,
)
def test_after_delete(self):
self.project.remove_node(Auth(user=self.project.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_true(self.node_settings.user_settings is None)
@mock.patch('website.archiver.tasks.archive')
def test_does_not_get_copied_to_registrations(self, mock_archive):
registration = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(user=self.project.creator),
data='hodor',
)
assert_false(registration.has_addon('github'))
class TestGithubNodeSettings(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = UserFactory()
self.user.add_addon('github')
self.user_settings = self.user.get_addon('github')
self.external_account = GitHubAccountFactory()
self.user_settings.owner.external_accounts.append(self.external_account)
self.user_settings.owner.save()
self.node_settings = GitHubNodeSettingsFactory(user_settings=self.user_settings)
@mock.patch('website.addons.github.api.GitHubClient.delete_hook')
def test_delete_hook(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_true(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('website.addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_no_hook(self, mock_delete_hook):
res = self.node_settings.delete_hook()
assert_false(res)
assert_false(mock_delete_hook.called)
@mock.patch('website.addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_not_found(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = NotFoundError
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('website.addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_error(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = GitHubError(mock.Mock())
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutronclient.common.exceptions import NeutronClientException
from neutronclient.v2_0 import client as neutronclient
from novaclient.v1_1 import security_group_rules as nova_sgr
from novaclient.v1_1 import security_groups as nova_sg
from heat.common import exception
from heat.common import template_format
from heat.engine import clients
from heat.engine import parser
from heat.engine import scheduler
from heat.tests.common import HeatTestCase
from heat.tests.fakes import FakeKeystoneClient
from heat.tests import utils
from heat.tests.v1_1 import fakes
class SecurityGroupTest(HeatTestCase):
test_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_sg:
Type: OS::Neutron::SecurityGroup
Properties:
description: HTTP and SSH access
rules:
- port_range_min: 22
port_range_max: 22
remote_ip_prefix: 0.0.0.0/0
protocol: tcp
- port_range_min: 80
port_range_max: 80
protocol: tcp
remote_ip_prefix: 0.0.0.0/0
- remote_mode: remote_group_id
remote_group_id: wwww
protocol: tcp
- direction: egress
port_range_min: 22
port_range_max: 22
protocol: tcp
remote_ip_prefix: 10.0.1.0/24
- direction: egress
remote_mode: remote_group_id
remote_group_id: xxxx
- direction: egress
remote_mode: remote_group_id
'''
test_template_update = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_sg:
Type: OS::Neutron::SecurityGroup
Properties:
description: SSH access for private network
name: myrules
rules:
- port_range_min: 22
port_range_max: 22
remote_ip_prefix: 10.0.0.10/24
protocol: tcp
'''
test_template_validate = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_sg:
Type: OS::Neutron::SecurityGroup
Properties:
name: default
'''
def setUp(self):
super(SecurityGroupTest, self).setUp()
self.fc = fakes.FakeClient()
self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
self.m.StubOutWithMock(clients.OpenStackClients, 'keystone')
self.m.StubOutWithMock(nova_sgr.SecurityGroupRuleManager, 'create')
self.m.StubOutWithMock(nova_sgr.SecurityGroupRuleManager, 'delete')
self.m.StubOutWithMock(nova_sg.SecurityGroupManager, 'create')
self.m.StubOutWithMock(nova_sg.SecurityGroupManager, 'delete')
self.m.StubOutWithMock(nova_sg.SecurityGroupManager, 'get')
self.m.StubOutWithMock(nova_sg.SecurityGroupManager, 'list')
utils.setup_dummy_db()
self.m.StubOutWithMock(neutronclient.Client, 'create_security_group')
self.m.StubOutWithMock(
neutronclient.Client, 'create_security_group_rule')
self.m.StubOutWithMock(neutronclient.Client, 'show_security_group')
self.m.StubOutWithMock(
neutronclient.Client, 'delete_security_group_rule')
self.m.StubOutWithMock(neutronclient.Client, 'delete_security_group')
self.m.StubOutWithMock(neutronclient.Client, 'update_security_group')
def create_stack(self, template):
t = template_format.parse(template)
self.stack = self.parse_stack(t)
self.assertIsNone(self.stack.create())
return self.stack
def parse_stack(self, t):
stack_name = 'test_stack'
tmpl = parser.Template(t)
stack = parser.Stack(utils.dummy_context(), stack_name, tmpl)
stack.store()
return stack
def assertResourceState(self, rsrc, ref_id, metadata={}):
self.assertIsNone(rsrc.validate())
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(ref_id, rsrc.FnGetRefId())
self.assertEqual(metadata, dict(rsrc.metadata))
@utils.stack_delete_after
def test_security_group(self):
show_created = {'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'sc1',
'description': '',
'security_group_rules': [{
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': '22',
'id': 'bbbb',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '22'
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': '80',
'id': 'cccc',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '80'
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': None,
'id': 'dddd',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}, {
'direction': 'egress',
'protocol': 'tcp',
'port_range_max': '22',
'id': 'eeee',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '22'
}, {
'direction': 'egress',
'protocol': None,
'port_range_max': None,
'id': 'ffff',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': 'xxxx',
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}, {
'direction': 'egress',
'protocol': None,
'port_range_max': None,
'id': 'gggg',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': 'aaaa',
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}],
'id': 'aaaa'}
}
#create script
clients.OpenStackClients.keystone().AndReturn(
FakeKeystoneClient())
sg_name = utils.PhysName('test_stack', 'the_sg')
neutronclient.Client.create_security_group({
'security_group': {
'name': sg_name,
'description': 'HTTP and SSH access'
}
}).AndReturn({
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': sg_name,
'description': 'HTTP and SSH access',
'security_group_rules': [{
"direction": "egress",
"ethertype": "IPv4",
"id": "aaaa-1",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "aaaa",
"tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
}, {
"direction": "egress",
"ethertype": "IPv6",
"id": "aaaa-2",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "aaaa",
"tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
}],
'id': 'aaaa'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'bbbb'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '80',
'ethertype': 'IPv4',
'port_range_max': '80',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '80',
'ethertype': 'IPv4',
'port_range_max': '80',
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'cccc'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'dddd'
}
})
neutronclient.Client.show_security_group('aaaa').AndReturn({
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': sg_name,
'description': 'HTTP and SSH access',
'security_group_rules': [{
"direction": "egress",
"ethertype": "IPv4",
"id": "aaaa-1",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "aaaa",
"tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
}, {
"direction": "egress",
"ethertype": "IPv6",
"id": "aaaa-2",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "aaaa",
"tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
}],
'id': 'aaaa'
}
})
neutronclient.Client.delete_security_group_rule('aaaa-1').AndReturn(
None)
neutronclient.Client.delete_security_group_rule('aaaa-2').AndReturn(
None)
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'eeee'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': 'xxxx',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': 'xxxx',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa',
'id': 'ffff'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': 'aaaa',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': 'aaaa',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa',
'id': 'gggg'
}
})
# update script
neutronclient.Client.update_security_group(
'aaaa',
{'security_group': {
'description': 'SSH access for private network',
'name': 'myrules'}}
).AndReturn({
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'myrules',
'description': 'SSH access for private network',
'security_group_rules': [],
'id': 'aaaa'
}
})
neutronclient.Client.show_security_group('aaaa').AndReturn(
show_created)
neutronclient.Client.delete_security_group_rule('bbbb').AndReturn(None)
neutronclient.Client.delete_security_group_rule('cccc').AndReturn(None)
neutronclient.Client.delete_security_group_rule('dddd').AndReturn(None)
neutronclient.Client.delete_security_group_rule('eeee').AndReturn(None)
neutronclient.Client.delete_security_group_rule('ffff').AndReturn(None)
neutronclient.Client.delete_security_group_rule('gggg').AndReturn(None)
neutronclient.Client.show_security_group('aaaa').AndReturn({
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'sc1',
'description': '',
'security_group_rules': [],
'id': 'aaaa'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
}
}).AndReturn({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': None,
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa',
'id': 'hhhh'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'ethertype': 'IPv6',
'security_group_id': 'aaaa',
}
}).AndReturn({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': None,
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv6',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa',
'id': 'iiii'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '10.0.0.10/24',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '10.0.0.10/24',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'jjjj'
}
})
# delete script
neutronclient.Client.show_security_group('aaaa').AndReturn(
show_created)
neutronclient.Client.delete_security_group_rule('bbbb').AndReturn(None)
neutronclient.Client.delete_security_group_rule('cccc').AndReturn(None)
neutronclient.Client.delete_security_group_rule('dddd').AndReturn(None)
neutronclient.Client.delete_security_group_rule('eeee').AndReturn(None)
neutronclient.Client.delete_security_group_rule('ffff').AndReturn(None)
neutronclient.Client.delete_security_group_rule('gggg').AndReturn(None)
neutronclient.Client.delete_security_group('aaaa').AndReturn(None)
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
sg = stack['the_sg']
self.assertResourceState(sg, 'aaaa')
updated_tmpl = template_format.parse(self.test_template_update)
updated_stack = utils.parse_stack(updated_tmpl)
stack.update(updated_stack)
stack.delete()
self.m.VerifyAll()
@utils.stack_delete_after
def test_security_group_exception(self):
#create script
clients.OpenStackClients.keystone().AndReturn(
FakeKeystoneClient())
sg_name = utils.PhysName('test_stack', 'the_sg')
neutronclient.Client.create_security_group({
'security_group': {
'name': sg_name,
'description': 'HTTP and SSH access'
}
}).AndReturn({
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': sg_name,
'description': 'HTTP and SSH access',
'security_group_rules': [],
'id': 'aaaa'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndRaise(
NeutronClientException(status_code=409))
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '80',
'ethertype': 'IPv4',
'port_range_max': '80',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndRaise(
NeutronClientException(status_code=409))
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndRaise(
NeutronClientException(status_code=409))
neutronclient.Client.show_security_group('aaaa').AndReturn({
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': sg_name,
'description': 'HTTP and SSH access',
'security_group_rules': [],
'id': 'aaaa'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndRaise(
NeutronClientException(status_code=409))
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': 'xxxx',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa'
}
}).AndRaise(
NeutronClientException(status_code=409))
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': 'aaaa',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa'
}
}).AndRaise(
NeutronClientException(status_code=409))
# delete script
neutronclient.Client.show_security_group('aaaa').AndReturn({
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'sc1',
'description': '',
'security_group_rules': [{
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': '22',
'id': 'bbbb',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '22'
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': '80',
'id': 'cccc',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '80'
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': None,
'id': 'dddd',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}, {
'direction': 'egress',
'protocol': 'tcp',
'port_range_max': '22',
'id': 'eeee',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '22'
}, {
'direction': 'egress',
'protocol': None,
'port_range_max': None,
'id': 'ffff',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': 'xxxx',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}, {
'direction': 'egress',
'protocol': None,
'port_range_max': None,
'id': 'gggg',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': 'aaaa',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}],
'id': 'aaaa'}})
neutronclient.Client.delete_security_group_rule('bbbb').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_security_group_rule('cccc').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_security_group_rule('dddd').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_security_group_rule('eeee').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_security_group_rule('ffff').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_security_group_rule('gggg').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_security_group('aaaa').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.show_security_group('aaaa').AndRaise(
NeutronClientException(status_code=404))
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
sg = stack['the_sg']
self.assertResourceState(sg, 'aaaa')
scheduler.TaskRunner(sg.delete)()
sg.state_set(sg.CREATE, sg.COMPLETE, 'to delete again')
sg.resource_id = 'aaaa'
stack.delete()
self.m.VerifyAll()
@utils.stack_delete_after
def test_security_group_validate(self):
stack = self.create_stack(self.test_template_validate)
sg = stack['the_sg']
ex = self.assertRaises(exception.StackValidationFailed, sg.validate)
self.assertEqual(
'Security groups cannot be assigned the name "default".',
ex.message)
|
|
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseGetitemTests(BaseExtensionTests):
"""Tests for ExtensionArray.__getitem__."""
def test_iloc_series(self, data):
ser = pd.Series(data)
result = ser.iloc[:4]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.iloc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_iloc_frame(self, data):
df = pd.DataFrame({"A": data, 'B':
np.arange(len(data), dtype='int64')})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.iloc[:4, [0]]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.iloc[[0, 1, 2, 3], [0]]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name='A')
# slice -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
# sequence -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
def test_loc_series(self, data):
ser = pd.Series(data)
result = ser.loc[:3]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.loc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_loc_frame(self, data):
df = pd.DataFrame({"A": data,
'B': np.arange(len(data), dtype='int64')})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.loc[:3, ['A']]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.loc[[0, 1, 2, 3], ['A']]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name='A')
# slice -> series
result = df.loc[:3, 'A']
self.assert_series_equal(result, expected)
# sequence -> series
result = df.loc[:3, 'A']
self.assert_series_equal(result, expected)
def test_getitem_scalar(self, data):
result = data[0]
assert isinstance(result, data.dtype.type)
result = pd.Series(data)[0]
assert isinstance(result, data.dtype.type)
def test_getitem_scalar_na(self, data_missing, na_cmp, na_value):
result = data_missing[0]
assert na_cmp(result, na_value)
def test_getitem_mask(self, data):
# Empty mask, raw array
mask = np.zeros(len(data), dtype=bool)
result = data[mask]
assert len(result) == 0
assert isinstance(result, type(data))
# Empty mask, in series
mask = np.zeros(len(data), dtype=bool)
result = pd.Series(data)[mask]
assert len(result) == 0
assert result.dtype == data.dtype
# non-empty mask, raw array
mask[0] = True
result = data[mask]
assert len(result) == 1
assert isinstance(result, type(data))
# non-empty mask, in series
result = pd.Series(data)[mask]
assert len(result) == 1
assert result.dtype == data.dtype
def test_getitem_slice(self, data):
# getitem[slice] should return an array
result = data[slice(0)] # empty
assert isinstance(result, type(data))
result = data[slice(1)] # scalar
assert isinstance(result, type(data))
def test_get(self, data):
# GH 20882
s = pd.Series(data, index=[2 * i for i in range(len(data))])
assert s.get(4) == s.iloc[2]
result = s.get([4, 6])
expected = s.iloc[[2, 3]]
self.assert_series_equal(result, expected)
result = s.get(slice(2))
expected = s.iloc[[0, 1]]
self.assert_series_equal(result, expected)
assert s.get(-1) is None
assert s.get(s.index.max() + 1) is None
s = pd.Series(data[:6], index=list('abcdef'))
assert s.get('c') == s.iloc[2]
result = s.get(slice('b', 'd'))
expected = s.iloc[[1, 2, 3]]
self.assert_series_equal(result, expected)
result = s.get('Z')
assert result is None
assert s.get(4) == s.iloc[4]
assert s.get(-1) == s.iloc[-1]
assert s.get(len(s)) is None
# GH 21257
s = pd.Series(data)
s2 = s[::2]
assert s2.get(1) is None
def test_take_sequence(self, data):
result = pd.Series(data)[[0, 1, 3]]
assert result.iloc[0] == data[0]
assert result.iloc[1] == data[1]
assert result.iloc[2] == data[3]
def test_take(self, data, na_value, na_cmp):
result = data.take([0, -1])
assert result.dtype == data.dtype
assert result[0] == data[0]
assert result[1] == data[-1]
result = data.take([0, -1], allow_fill=True, fill_value=na_value)
assert result[0] == data[0]
assert na_cmp(result[1], na_value)
with tm.assert_raises_regex(IndexError, "out of bounds"):
data.take([len(data) + 1])
def test_take_empty(self, data, na_value, na_cmp):
empty = data[:0]
result = empty.take([-1], allow_fill=True)
assert na_cmp(result[0], na_value)
with pytest.raises(IndexError):
empty.take([-1])
with tm.assert_raises_regex(IndexError, "cannot do a non-empty take"):
empty.take([0, 1])
def test_take_negative(self, data):
# https://github.com/pandas-dev/pandas/issues/20640
n = len(data)
result = data.take([0, -n, n - 1, -1])
expected = data.take([0, 0, n - 1, n - 1])
self.assert_extension_array_equal(result, expected)
def test_take_non_na_fill_value(self, data_missing):
fill_value = data_missing[1] # valid
na = data_missing[0]
array = data_missing._from_sequence([na, fill_value, na])
result = array.take([-1, 1], fill_value=fill_value, allow_fill=True)
expected = array.take([1, 1])
self.assert_extension_array_equal(result, expected)
def test_take_pandas_style_negative_raises(self, data, na_value):
with pytest.raises(ValueError):
data.take([0, -2], fill_value=na_value, allow_fill=True)
@pytest.mark.parametrize('allow_fill', [True, False])
def test_take_out_of_bounds_raises(self, data, allow_fill):
arr = data[:3]
with pytest.raises(IndexError):
arr.take(np.asarray([0, 3]), allow_fill=allow_fill)
def test_take_series(self, data):
s = pd.Series(data)
result = s.take([0, -1])
expected = pd.Series(
data._from_sequence([data[0], data[len(data) - 1]], dtype=s.dtype),
index=[0, len(data) - 1])
self.assert_series_equal(result, expected)
def test_reindex(self, data, na_value):
s = pd.Series(data)
result = s.reindex([0, 1, 3])
expected = pd.Series(data.take([0, 1, 3]), index=[0, 1, 3])
self.assert_series_equal(result, expected)
n = len(data)
result = s.reindex([-1, 0, n])
expected = pd.Series(
data._from_sequence([na_value, data[0], na_value],
dtype=s.dtype),
index=[-1, 0, n])
self.assert_series_equal(result, expected)
result = s.reindex([n, n + 1])
expected = pd.Series(data._from_sequence([na_value, na_value],
dtype=s.dtype),
index=[n, n + 1])
self.assert_series_equal(result, expected)
def test_reindex_non_na_fill_value(self, data_missing):
valid = data_missing[1]
na = data_missing[0]
array = data_missing._from_sequence([na, valid])
ser = pd.Series(array)
result = ser.reindex([0, 1, 2], fill_value=valid)
expected = pd.Series(data_missing._from_sequence([na, valid, valid]))
self.assert_series_equal(result, expected)
|
|
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# util.py
# Created by Disa Mhembere on 2013-02-25.
# Copyright (c) 2013. All rights reserved.
# All heper methods for MROCP views.py
import os
import sys
import zipfile
import tempfile
import re
from random import randint
from django.conf import settings
from django.core.mail import send_mail
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
def makeDirIfNone(dirPathList):
'''
Create a dir specified by dirPathList. Failure usual due to permissions issues.
@param dirPathList: A 'list' of the full paths of directory(ies) to be created
'''
for dirPath in dirPathList:
try:
if not (os.path.exists(dirPath)):
os.makedirs(dirPath)
print "%s directory made successfully" % dirPath
else:
print "%s directory already exists" % dirPath
except:
print "[ERROR] while attempting to create %s" % dirPath
sys.exit(-1)
################################################################################
def getFiberPath(fiberFileName):
'''
This returns fiberfn's full path less the 'fiber.dat' portion
@param fiberFileName - is a tract file name with naming convention '[filename]_fiber.dat'
where filename may vary but _fiber.dat may not.
'''
return fiberFileName.partition('_')[0]
################################################################################
def defDataDirs(projectDir):
'''
Define all the paths to the data product directories
@param projectDir: the fully qualified path of the project directory
'''
derivatives = os.path.join(projectDir, 'derivatives')
#rawdata = os.path.join(projectDir, 'rawdata')
graphs = os.path.join(projectDir, 'graphs')
#graphInvariants = os.path.join(projectDir, 'graphInvariants')
#images = os.path.join(projectDir, 'images')
return [derivatives, graphs]
################################################################################
def getFiberID(fiberfn):
'''
Assumptions about the data made here as far as file naming conventions
@param fiberfn: the dMRI streamline file in format {filename}_fiber.dat
'''
if fiberfn.endswith('/'):
fiberfn = fiberfn[:-1] # get rid of trailing slash
if re.match(re.compile(r'.+_fiber$'), os.path.splitext(fiberfn.split('/')[-1])[0]):
return(os.path.splitext(fiberfn.split('/')[-1])[0]).split('_')[0] + '_'
else:
return os.path.splitext(fiberfn.split('/')[-1])[0] + '_'
################################################################################
def writeBodyToDisk(data, saveDir):
'''
Write the requests body to disk
@param data: the data to be written to file
@param saveDir: the location of where data is to be written
@return a list with the names of the uplaoded files
'''
tmpfile = tempfile.NamedTemporaryFile(dir="/data/pytmp")
tmpfile.write(data)
tmpfile.flush()
tmpfile.seek(0)
rzfile = zipfile.ZipFile(tmpfile.name, "r", allowZip64=True)
print 'Temporary file created...'
''' Extract & save zipped files '''
uploadFiles = []
for name in (rzfile.namelist()):
try:
bname = os.path.basename(name)
# strip name of source folders if in file name
outfile = open(os.path.join(saveDir, bname), 'wb')
outfile.write(rzfile.read(name))
outfile.flush()
outfile.close()
uploadFiles.append(os.path.join(saveDir, bname)) # add to list of files
print bname + " written to disk.."
except Exception:
print "\n[WARNING]: Item %s rejected for file download ...\n" % name
return uploadFiles
################################################################################
def adaptProjNameIfReq(projPath):
'''
If the directory already exists take a name close to
the requested one just as file systems do with files
named the same in a directory
'''
if not os.path.exists(projPath):
return projPath
else:
projbase = projPath[:-(len(projPath.split('/')[-1]))]
scanID = projPath.split('/')[-1]
while (os.path.exists(projPath)):
if not (re.match(re.compile('.*_\d+$'), scanID)):
return os.path.join(projbase, scanID+'_1')
else:
return os.path.join(projbase, addOneToDirNum(scanID))
################################################################################
def addOneToDirNum(dirname):
'''
Used for file uploads where file directory match another
Adds one to the directory number of a directory with
pattern matching regex = r'.*_\d+$'
'''
idx = -1
char = dirname[idx]
while(char != '_'):
idx -= 1
char = dirname[idx]
return dirname[:idx+1] + str(int(dirname[idx + 1:]) + 1)
################################################################################
def saveFileToDisk(fileData, fullFileName):
'''
@param f: the file data from a form that is to be saved
@param fullFileName: the fully qualified file name i.e where it should be stored
and what it should be named
'''
if not os.path.exists(os.path.dirname(fullFileName)):
os.makedirs(os.path.dirname(fullFileName))
print "Making directory: %s ..." % os.path.dirname(fullFileName)
with open(fullFileName, 'wb+') as destination:
#destination = open(fullFileName, 'wb+') # Consider try: except for this
for chunk in fileData.chunks():
destination.write(chunk)
#destination.close()
print "Saving file: %s " % fullFileName
################################################################################
def to_html(string, link):
return """
<html>
<head></head>
<body>
<p>
{}
</p>
</body>
</html>
""".format(string.replace("\n", "<br>")).\
format("<a href=\"{}\">this link</a>".format(link))
def sendJobBeginEmail(email_addr, invariants, genGraph=True):
msg = "Hello,\n\nThe following actions were requested using %s:\n" % email_addr
if genGraph:
msg += "- Generate graph\n"+" "*randint(0,10)
for inv in invariants:
msg += "- Compute " + settings.VALID_FILE_TYPES[inv] + "\n"
msg += "\nYou will receive another email when your job completes."
msg += "\n\nThanks for using the service,\nThe MROCP team"
send_mail("Graph job request",
msg, settings.SERVER_EMAIL, [email_addr], fail_silently=False)
def sendJobFailureEmail(email_addr, txt, data_loc=""):
txt += "Thanks for using the service,\nThe MROCP team"
msg = txt.format(data_loc)
html = to_html(txt, data_loc)
send_mail("Graph job FAILURE!",
msg, settings.SERVER_EMAIL, [email_addr], fail_silently=False, html_message=html)
def sendJobCompleteEmail(email_addr, data_loc):
txt = "Congratulations,\n\nThe job you requested is complete and "\
"available for download at {}.\n\nThanks for using the service,"\
"\nThe MROCP team"
msg = txt.format(data_loc)
html = to_html(txt, data_loc)
send_mail("Graph job COMPLETE!",
msg, settings.SERVER_EMAIL, [email_addr], fail_silently=False, html_message=html)
def sendEmail(email_addr, title, msg):
msg += "Thanks for using the service,\nThe MROCP team"
send_mail(title,
msg, settings.SERVER_EMAIL, [email_addr], fail_silently=False)
#####################################################################################
def get_genus(fn):
"""
Get the genus given a file path
@param fn: the genus directory name
"""
sep = "/"
genera = os.listdir(settings.GRAPH_DIR)
for idx, name in enumerate(fn.split(sep)):
if name in genera:
return name
print "No genus found!"
return "" # Unknown
#####################################################################################
def get_script_prefix():
from django.core.urlresolvers import get_script_prefix as gsp
from ocpipeline.settings import URL_BASE
return gsp() + URL_BASE
#####################################################################################
def get_download_path(fspath):
term_char = ""
if not fspath.endswith("/"):
if os.path.isdir(fspath): term_char = "/"
#return "http://awesome.cs.jhu.edu/" + \
# ("/".join(fspath.split("/")[4:])).replace(' ','%20') + term_char
return "http://openconnecto.me/mr" + \
("/".join(fspath.split("/")[4:])).replace(' ','%20') + term_char
#####################################################################################
# Simple email address test
def check_email(email):
patt = re.compile("[^@]+@[^@]+\.[^@]+")
if (re.match(patt, email)):
return True
return False
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts MNIST data to TFRecords of TF-Example protos.
This module downloads the MNIST data, uncompresses it, reads the files
that make up the MNIST data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import numpy as np
from six.moves import urllib
import tensorflow as tf
from datasets import dataset_utils
# The URLs where the MNIST data can be downloaded.
_DATA_URL = 'http://yann.lecun.com/exdb/mnist/'
_TRAIN_DATA_FILENAME = 'train-images-idx3-ubyte.gz'
_TRAIN_LABELS_FILENAME = 'train-labels-idx1-ubyte.gz'
_TEST_DATA_FILENAME = 't10k-images-idx3-ubyte.gz'
_TEST_LABELS_FILENAME = 't10k-labels-idx1-ubyte.gz'
_IMAGE_SIZE = 28
_NUM_CHANNELS = 1
# The names of the classes.
_CLASS_NAMES = [
b'zero',
b'one',
b'two',
b'three',
b'four',
b'five',
b'size',
b'seven',
b'eight',
b'nine',
]
def _extract_images(filename, num_images):
"""Extract the images into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
print('Extracting images from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
_IMAGE_SIZE * _IMAGE_SIZE * num_images * _NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
return data
def _extract_labels(filename, num_labels):
"""Extract the labels into a vector of int64 label IDs.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A numpy array of shape [number_of_labels]
"""
print('Extracting labels from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def _add_to_tfrecord(data_filename, labels_filename, num_images,
tfrecord_writer):
"""Loads data from the binary MNIST files and writes files to a TFRecord.
Args:
data_filename: The filename of the MNIST images.
labels_filename: The filename of the MNIST labels.
num_images: The number of images in the dataset.
tfrecord_writer: The TFRecord writer to use for writing.
"""
images = _extract_images(data_filename, num_images)
labels = _extract_labels(labels_filename, num_images)
shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for j in range(num_images):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, num_images))
sys.stdout.flush()
png_string = sess.run(encoded_png, feed_dict={image: images[j]})
example = dataset_utils.image_to_tfexample(
png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j],
_CLASS_NAMES[labels[j]], channels=1)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The directory where the temporary files are stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/%s-mnist.tfrecord' % (dataset_dir, split_name)
def _download_dataset(dataset_dir):
"""Downloads MNIST locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
print('Downloading file %s...' % filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %.1f%%' % (
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_DATA_URL + filename,
filepath,
_progress)
print()
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
_download_dataset(dataset_dir)
# First, process the training data:
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TRAIN_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TRAIN_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 60000, tfrecord_writer)
# Next, process the testing data:
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TEST_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TEST_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 10000, tfrecord_writer)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the MNIST dataset!')
|
|
import numpy as np
'''
Homework 1
Problem 10
Problem type: 3 - parameter estimation
'''
class HMM:
"""
Steps:
Compute alpha[0] : 1XN
Scale alpha[0]
Compute alpha[1:T-1] : TXN
Scale alpha[1:T-1]
Compute beta[T-1] : 1XN
Compute scaled beta[T-2:0] : TXN
for each t:
Compute denom : 1X1
Compute digamma : NXN
Compute gamma : 1XN
digamma: TXNXN
gamma: TXN
Re-estimate pi: 1XN
Re-estimate A: NXN
Re-estimate B: MXN
Compute logProb
Iteration logic
End
"""
def __init__(self, minIters, threshold, lambdaProvider):
self.minIters = minIters
self.threshold = threshold
self.iters = 0
self.oldLogProb = -np.inf
self.A = lambdaProvider.A
self.B = lambdaProvider.B
self.pi = lambdaProvider.pi
'''
Assuming obsSeq[] has T symbols 0...M-1
'''
def alphaPass(self, obsSeq):
A = self.A
B = self.B
pi = self.pi
T = len(obsSeq)
N = np.shape(A)[0]
o0 = int(obsSeq[0])
alpha = np.empty((T, N))
c = np.empty(T)
alpha[0] = np.multiply(pi, np.transpose(B[:, o0])) #1XN
c[0] = alpha[0].sum(axis=0)
c[0] = 1 / c[0]
# print(c[0])
alpha[0] = alpha[0] * c[0]
# alpha = np.transpose(alpha) #column matrix
for t in range(1, len(obsSeq)):
o = int(obsSeq[t])
alphaTmp = A * alpha[t-1][:, None]
alpha[t] = alphaTmp.sum(axis = 0)
alpha[t] = np.multiply(alpha[t], B[:, o])
c[t] = alpha[t].sum(axis = 0)
c[t] = 1 / c[t]
alpha[t] = alpha[t] * c[t]
return (alpha, c)
def betaPass(self, alpha, c, obsSeq):
A = self.A
B = self.B
pi = self.pi
T = len(obsSeq)
N = np.shape(A)[0]
beta = np.empty((T, N))
beta[T-1] = np.ones(N) * c[T-1]
t = T-2
while t>=0:
o = int(obsSeq[t+1])
bCol = np.transpose(B[:,o])
betaRow = beta[t+1]
betaTmp = A * bCol[:,None] * betaRow[:,None]
beta[t] = betaTmp.sum(axis=0)
beta[t] = beta[t] * c[t]
t = t - 1
return beta
def gammaPass(self, alpha, beta, obsSeq):
A = self.A
B = self.B
pi = self.pi
T = len(obsSeq)
N = np.shape(A)[0]
denom = np.empty(T)
gamma = np.empty([T,N])
digamma = np.empty([T,N,N])
for t in range(0, T-2):
o = int(obsSeq[t+1])
bCol = np.transpose(B[:,o])
betaRow = beta[t+1]
alphaRow = alpha[t]
denomTmp = A * alphaRow[:,None] * bCol[:,None] * betaRow[:,None]
denom[t] = denomTmp.sum(axis=0).sum()
digamma[t] = denomTmp / denom[t]
gamma[t] = digamma[t].sum(axis=1)
denom = alpha[T-1].sum()
gamma[T-1] = alpha[T-1] / denom
return(gamma, digamma)
def reestimate(self, gamma, digamma, obsSeq):
newPi = gamma[0]
B = self.B
M = np.shape(B)[1]
N = np.shape(B)[0]
T = len(obsSeq)
digammaSumAcrossT = digamma[0:T-2].sum(axis=0)
gammaSumAcrossT = gamma[0:T-2].sum(axis=0)
newA = digammaSumAcrossT / gammaSumAcrossT
newB = np.empty([N,M])
for i in range(0, N-1):
for m in range(0,M):
numer = 0
denom = 0
for t in range(0, T-1):
o = int(obsSeq[t])
denom = denom + gamma[t,i]
if m == o:
numer = numer + gamma[t,i]
newB[i,m] = numer/denom
return (newPi, newA, newB)
def logProb(self, c):
logC = np.log(c)
logProb = logC.sum()
return -logProb
def checkPerf(self, logProb):
self.iters = self.iters + 1
delta = abs(logProb - self.oldLogProb)
if(self.iters < self.minIters or delta > self.threshold):
print("Iter: %s, minIters: %s, delta:%f, thresh:%f" % (self.iters, self.minIters, delta, self.threshold) )
self.oldLogProb = logProb
return True
else:
return False
def executeType3(self, obsSeq):
doIterate = True
# print(obsSeq)
while(doIterate == True):
(alpha, c) = self.alphaPass(obsSeq)
# print("ALPHA: " + str(np.shape(alpha)))
# print("C" + str(np.shape(c)))
# print("ALPHA: " + str(alpha))
# print("C" + str(c))
beta = self.betaPass(alpha, c, obsSeq)
# print("BETA: " + str(np.shape(beta)))
# print("BETA: " + str(beta))
(gamma, digamma) = self.gammaPass(alpha, beta, obsSeq)
# print("GAMMA" + str(np.shape(gamma)))
# print("DIGAMMA" + str(np.shape(digamma)))
(newPi, newA, newB) = self.reestimate(gamma, digamma, obsSeq)
# print("newPi: " + str(np.shape(newPi)))
# print("newA: " + str(np.shape(newA)))
# print("newB: " + str(np.shape(newB)))
logProb = self.logProb(c)
doIterate = self.checkPerf(logProb)
self.A = newA
self.B = newB
self.pi = newPi
# print(newA)
print("Iteration#%d: logProb=%f" % (self.iters, logProb))
# break
class BrownCorpus:
def convert(self, w):
ret = []
for c in w:
if c == ' ':
ret.append('26')
else:
ret.append(str(ord(c) - ord('a')))
return ret
def revconvert(self, arr):
ret = []
for a in arr:
c = chr(a + ord('a'))
ret.append(c)
return ret
def obsSeq(self):
from nltk.corpus import brown
wl = brown.words()[0:2000]
ret = []
for w in wl:
w = w.lower()
if(w.isalpha()):
ret = ret + self.convert(w)
ret = ret + self.convert(" ")
return ret
class LambdaBuilder:
def __init__(self, N, M):
self.N = N
self.M = M
@property
def A(self):
ret = np.ones([self.N, self.N])
ret = ret / self.N
# ret = [[0.47, 0.53],[0.51, 0.49]]
# print("A= " + str(ret))
return ret
@property
def B(self):
ret = np.ones([self.N, self.M])
ret = ret / self.M
# print("B= " + str(ret))
return ret
@property
def pi(self):
ret = np.ones([1, self.N])
ret = ret / self.N
# pi = [0.51, 0.49]
# print("PI= " + str(ret))
return ret
def main():
obsSeq = BrownCorpus().obsSeq()
lambdaBuilder = LambdaBuilder(2, 27)
hmm = HMM(100, 10000, lambdaBuilder)
hmm.executeType3(obsSeq)
print(hmm.A)
print(hmm.B)
if __name__ == "__main__":
main()
|
|
"""
Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .init import *
class Profile(object):
"""
Class containing methods for retrieving
submissions of user
"""
site_name = "HackerRank"
# -------------------------------------------------------------------------
def __init__(self, handle=""):
"""
@param handle (String): HackerRank handle
"""
self.site = Profile.site_name
self.handle = handle
# -------------------------------------------------------------------------
@staticmethod
def is_valid_url(url):
return url.__contains__("hackerrank.com/")
# --------------------------------------------------------------------------
@staticmethod
def is_website_down():
"""
@return (Boolean): If the website is down
"""
return (Profile.site_name in current.REDIS_CLIENT.smembers("disabled_retrieval"))
# -------------------------------------------------------------------------
@staticmethod
def get_tags(response):
"""
@param response(Dict): Response json from the API
@return (List): List of tags
"""
all_tags = []
model = response["model"]
track = model["track"]
primary_contest = model["primary_contest"]
if track:
# If the problem is a practice problem
all_tags = [track["name"]]
elif primary_contest:
if primary_contest["track"]:
# If the problem is a contest problem with track
all_tags = [primary_contest["track"]["name"]]
elif primary_contest["name"]:
# If the problem is a contest problem without track
# Then consider contest name as tag
all_tags = [primary_contest["name"]]
return all_tags
# -------------------------------------------------------------------------
@staticmethod
def get_editorial_link(response, problem_link):
"""
@param response(Dict): Response json from the API
@param problem_link(String): Problem link
@return (String/None): Editorial link
"""
editorial_present = response["model"]["is_editorial_available"]
editorial_link = problem_link + "/editorial/" if editorial_present else None
return editorial_link
# -------------------------------------------------------------------------
@staticmethod
def get_problem_setters(response):
"""
@param response(Dict): Response json from the API
@return (List/None): Problem authors or None
"""
author = utilities.get_key_from_dict(response["model"],
"author_name",
None)
return None if author is None else [author]
# -------------------------------------------------------------------------
@staticmethod
def get_problem_details(**args):
"""
Get problem_details given a problem link
@param args (Dict): Dict containing problem_link
@return (Dict): Details of the problem returned in a dictionary
"""
all_tags = []
editorial_link = None
problem_link = args["problem_link"]
problem_setters = None
if problem_link.__contains__("contests"):
rest_url = problem_link.replace("contests/",
"rest/contests/")
else:
rest_url = problem_link.replace("challenges/",
"rest/contests/master/challenges/")
response = get_request(rest_url)
if response in REQUEST_FAILURES:
return dict(tags=all_tags,
editorial_link=editorial_link,
problem_setters=problem_setters)
response = response.json()
return dict(tags=Profile.get_tags(response),
editorial_link=Profile.get_editorial_link(response, problem_link),
problem_setters=Profile.get_problem_setters(response))
# -------------------------------------------------------------------------
@staticmethod
def is_invalid_handle(handle):
url = "https://www.hackerrank.com/rest/hackers/" + \
handle + \
"/recent_challenges?offset=0&limit=2"
response = get_request(url)
if response in REQUEST_FAILURES:
return True
return False
# -------------------------------------------------------------------------
@staticmethod
def rating_graph_data(handle):
website = "https://www.hackerrank.com/"
url = "%srest/hackers/%s/rating_histories_elo" % (website, handle)
response = get_request(url)
if response in REQUEST_FAILURES:
return response
response = response.json()["models"]
hackerrank_graphs = []
for contest_class in response:
final_json = {}
for contest in contest_class["events"]:
time_stamp = contest["date"][:-5].split("T")
time_stamp = datetime.datetime.strptime(time_stamp[0] + " " + time_stamp[1],
"%Y-%m-%d %H:%M:%S")
# Convert UTC to IST
time_stamp += datetime.timedelta(hours=5, minutes=30)
time_stamp = str(time_stamp)
final_json[time_stamp] = {"name": contest["contest_name"],
"url": website + contest["contest_slug"],
"rating": str(contest["rating"]),
"rank": contest["rank"]}
graph_name = "HackerRank - %s" % contest_class["category"]
hackerrank_graphs.append({"title": graph_name,
"data": final_json})
return hackerrank_graphs
# -------------------------------------------------------------------------
def get_submissions(self, last_retrieved, is_daily_retrieval):
"""
Retrieve HackerRank submissions after last retrieved timestamp
@param last_retrieved (DateTime): Last retrieved timestamp for the user
@param is_daily_retrieval (Boolean): If this call is from daily retrieval cron
@return (Dict): Dictionary of submissions containing all the
information about the submissions
"""
handle = self.handle
url = "https://www.hackerrank.com/rest/hackers/" + \
handle + \
"/recent_challenges"
request_params = {"limit": "5", "response_version": "v2"}
submissions = []
next_cursor = "null"
for i in xrange(1000):
request_params["cursor"] = next_cursor
response = get_request(url,
params=request_params,
is_daily_retrieval=is_daily_retrieval)
if response in REQUEST_FAILURES:
return response
next_cursor = response.json()["cursor"]
for row in response.json()["models"]:
# Time of submission
# @Todo: This is ugly
time_stamp = row["created_at"][:-10].split("T")
time_stamp = time.strptime(time_stamp[0] + " " + time_stamp[1],
"%Y-%m-%d %H:%M:%S")
time_stamp = datetime.datetime(time_stamp.tm_year,
time_stamp.tm_mon,
time_stamp.tm_mday,
time_stamp.tm_hour,
time_stamp.tm_min,
time_stamp.tm_sec) + \
datetime.timedelta(minutes=330)
curr = time.strptime(str(time_stamp), "%Y-%m-%d %H:%M:%S")
if curr <= last_retrieved:
return submissions
submissions.append((str(time_stamp),
"https://www.hackerrank.com" + row["url"],
row["name"],
"AC",
"100",
"-",
""))
if response.json()["last_page"] == True:
break
return submissions
# =============================================================================
|
|
#!/usr/bin/env python3
import json
import logging
import os
logging.basicConfig(level=logging.DEBUG)
import requests
PUSH_URL = "https://{}/api/dashboard/demo/tile".format(
os.environ.get('DASHBOARD_IP_PORT', 'localhost:8000')
)
PUSH_URL += "/{tile_id}"
DEMO_ADDRESS = "https://docs.d45hb04rd.space"
tiles_data = {}
json_data = """{
"labels":[
"January",
"February",
"March",
"April",
"May",
"June",
"July"
],
"datasets":[
{
"label":"My First dataset",
"fill":false,
"lineTension":0.1,
"backgroundColor":"rgba(75,192,192,0.4)",
"borderColor":"rgba(75,192,192,1)",
"borderCapStyle":"butt",
"borderDash":[
],
"borderDashOffset":0,
"borderJoinStyle":"miter",
"pointBorderColor":"rgba(75,192,192,1)",
"pointBackgroundColor":"#fff",
"pointBorderWidth":1,
"pointHoverRadius":5,
"pointHoverBackgroundColor":"rgba(75,192,192,1)",
"pointHoverBorderColor":"rgba(220,220,220,1)",
"pointHoverBorderWidth":2,
"pointRadius":1,
"pointHitRadius":10,
"data":[
65,
59,
80,
81,
56,
55,
40
],
"spanGaps":false
}
]
}"""
tiles_data['tile-chart1'] = tiles_data['tile-chart-line'] = {
'tile-data': {
"header": "Line chart!!",
"type": "line",
"options": {},
"data": json.loads(json_data),
}
}
json_data = """{
"labels":[
"January",
"February",
"March",
"April",
"May",
"June",
"July"
],
"datasets":[
{
"label":"My First dataset",
"backgroundColor":[
"rgba(255, 99, 132, 0.2)",
"rgba(54, 162, 235, 0.2)",
"rgba(255, 206, 86, 0.2)",
"rgba(75, 192, 192, 0.2)",
"rgba(153, 102, 255, 0.2)",
"rgba(255, 159, 64, 0.2)"
],
"borderColor":[
"rgba(255,99,132,1)",
"rgba(54, 162, 235, 1)",
"rgba(255, 206, 86, 1)",
"rgba(75, 192, 192, 1)",
"rgba(153, 102, 255, 1)",
"rgba(255, 159, 64, 1)"
],
"borderWidth":1,
"data":[
65,
59,
80,
81,
56,
55,
40
]
}
]
}"""
tiles_data['tile-chart2'] = tiles_data['tile-chart-bar'] = {
'tile-data': {
"header": "Bar chart!!",
"type": "bar",
"options": {},
"data": json.loads(json_data),
}
}
json_data = """{
"labels":[
"Eating",
"Drinking",
"Sleeping",
"Designing",
"Coding",
"Cycling",
"Running"
],
"datasets":[
{
"label":"My First dataset",
"backgroundColor":"rgba(179,181,198,0.2)",
"borderColor":"rgba(179,181,198,1)",
"pointBackgroundColor":"rgba(179,181,198,1)",
"pointBorderColor":"#fff",
"pointHoverBackgroundColor":"#fff",
"pointHoverBorderColor":"rgba(179,181,198,1)",
"data":[
65,
59,
90,
81,
56,
55,
40
]
},
{
"label":"My Second dataset",
"backgroundColor":"rgba(255,99,132,0.2)",
"borderColor":"rgba(255,99,132,1)",
"pointBackgroundColor":"rgba(255,99,132,1)",
"pointBorderColor":"#fff",
"pointHoverBackgroundColor":"#fff",
"pointHoverBorderColor":"rgba(255,99,132,1)",
"data":[
28,
48,
40,
19,
96,
27,
100
]
}
]
}"""
tiles_data["tile-chart-radar"] = {
"tile-data": {
"header": "Radar tile!!",
"type": "radar",
"options": {},
"data": json.loads(json_data),
}
}
json_data = """{
"datasets":[
{
"data":[
11,
16,
7,
3,
14
],
"backgroundColor":[
"#FF6384",
"#4BC0C0",
"#FFCE56",
"#E7E9ED",
"#36A2EB"
],
"label":"My dataset"
}
],
"labels":[
"Red",
"Green",
"Yellow",
"Grey",
"Blue"
]
}"""
tiles_data['tile-chart-polar'] = {
'tile-data': {
"header": "Polar chart!!",
"type": "polarArea",
"options": {},
"data": json.loads(json_data),
}
}
json_data = """{
"labels":[
"Red",
"Blue",
"Yellow"
],
"datasets":[
{
"data":[
300,
50,
100
],
"backgroundColor":[
"#FF6384",
"#36A2EB",
"#FFCE56"
],
"hoverBackgroundColor":[
"#FF6384",
"#36A2EB",
"#FFCE56"
]
}
]
}"""
tiles_data['tile-chart3'] = tiles_data['tile-chart-pie'] = {
'tile-data': {
"header": "Pie chart!!",
"type": "pie",
"options": {},
"data": json.loads(json_data),
}
}
tiles_data['tile-chart-doughnut'] = {
'tile-data': {
"header": "Doughnut chart!!",
"type": "doughnut",
"options": {},
"data": json.loads(json_data),
}
}
json_data = """{
"datasets":[
{
"label":"First Dataset",
"data":[
{
"x":20,
"y":30,
"r":15
},
{
"x":40,
"y":10,
"r":10
}
],
"backgroundColor":"#FF6384",
"hoverBackgroundColor":"#FF6384"
}
]
}"""
tiles_data['tile-chart-bubble'] = {
'tile-data': {
"header": "Bubble chart!!",
"type": "bubble",
"options": {},
"data": json.loads(json_data),
}
}
tiles_data["tile-image1"] = {
"tile-data": {
"header": "Rust logo",
"header": "rust",
"imageSrc": "https://www.rust-lang.org/logos/rust-logo-256x256.png",
},
}
tiles_data['tile-image2'] = {
'tile-data': {
"header": "Polymer logo",
"imageSrc": "https://www.polymer-project.org/images/logos/p-logo.png",
}
}
tiles_data['tile-image3'] = {
'tile-data': {
"header": "vf",
"imageSrc": "https://media.giphy.com/media/D8UYWEpnUpJ4Y/giphy.gif",
}
}
tiles_data['tile-markdown1'] = tiles_data['tile-markdown-simple'] = {
'tile-data': {
"markdown": "`Markdown` is totally _awesome_!"
}
}
tiles_data['tile-markdown2'] = tiles_data['tile-markdown-listing'] = {
'tile-data': {
"markdown": """# Header 1
## Header 2
### Header 3
#### Header 4 ####
##### Header 5 #####
1. Item
2. Item
* Mixed
* Mixed
3. Item
"""
}
}
tiles_data['tile-markdown3'] = {
'tile-data': {
"markdown": """# Tile docs
Each of links below contains demo (to see the demo click button in top-right corner after link clicked).
* [Tile Chart]({demo_address}/components/dashboard-toolkit/#tile-chart)
* [Tile Image]({demo_address}/components/dashboard-toolkit/#tile-image)
* [Tile Markdown]({demo_address}/components/dashboard-toolkit/#tile-markdown)
* [Tile Value]({demo_address}/components/dashboard-toolkit/#tile-value)
* [Other Dashboards]({demo_address}/components/dashboard-toolkit/#dashboard-ws)
""".format(demo_address=DEMO_ADDRESS)
}
}
print(tiles_data['tile-markdown3'])
tiles_data['tile-value1'] = {
'tile-data': {
"value": "100%"
}
}
tiles_data['tile-value2'] = {
'tile-data': {
"value": "50%",
"color": "yellow",
"backgroundColor": "#12B0C5",
}
}
client = requests.Session()
client.headers.update(
{'Authorization': os.environ.get('DASHBOARD_DASHBOARD_TOKEN', 'change-me')}
)
for tile_id, data in sorted(tiles_data.items()):
jsoned_data = json.dumps(data)
url = PUSH_URL.format(tile_id=tile_id)
response = client.post(url, data=jsoned_data)
print(tile_id, response.status_code)
if response.status_code != 201:
print(response.content)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Operations for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ops import gen_tpu_ops
from tensorflow.python.ops.gen_tpu_ops import *
# pylint: enable=wildcard-import,unused-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu_function
def _create_default_group_assignment():
num_shards = tpu_function.get_tpu_context().number_of_shards
if num_shards is None:
logging.warning(
"cross_replica_sum should be used within a tpu_shard_context, but "
"got unset number_of_shards. Assuming 1.")
num_shards = 1
group_assignment = [list(range(num_shards))]
return group_assignment
def all_to_all(x,
concat_dimension,
split_dimension,
split_count,
group_assignment=None,
name=None):
"""Exchange data across TPU replicas.
Args:
x: The local tensor.
concat_dimension: The dimension number to concatenate.
split_dimension: The dimension number to split.
split_count: The number of splits, this number must equal to the sub-group
size(group_assignment.get_shape()[1])
group_assignment: Optional 2d int32 lists with shape [num_groups,
num_replicas_per_group]. `group_assignment[i]` represents the replica
ids in the ith subgroup.
name: Optional op name.
Returns:
A `Tensor` which is concatenated by data from different replicas.
"""
if group_assignment is None:
group_assignment = _create_default_group_assignment()
return gen_tpu_ops.all_to_all(
x,
group_assignment,
concat_dimension=concat_dimension,
split_dimension=split_dimension,
split_count=split_count,
name=name)
@ops.RegisterGradient("AllToAll")
def _all_to_all_grad(op, grad):
# The gradient of a all-to-all is also a all-to-all but the
# split_dimension and concat_dimension is swapped.
# The graident with respect to group_assignment is None.
return [
gen_tpu_ops.all_to_all(
grad,
op.inputs[1],
concat_dimension=op.get_attr("split_dimension"),
split_dimension=op.get_attr("concat_dimension"),
split_count=op.get_attr("split_count")), None
]
def cross_replica_sum(x, group_assignment=None, name=None):
"""Sum the input tensor across replicas according to group_assignment.
Args:
x: The local tensor to the sum.
group_assignment: Optional 2d int32 lists with shape [num_groups,
num_replicas_per_group]. `group_assignment[i]` represents the replica
ids in the ith subgroup.
name: Optional op name.
Returns:
A `Tensor` which is summed across replicas.
"""
if group_assignment is None:
group_assignment = _create_default_group_assignment()
return gen_tpu_ops.cross_replica_sum(x, group_assignment, name=name)
def collective_permute(x, source_target_pairs, name=None):
"""Permute the input tensor across replicas given source_target_pairs.
For each source_target_pair <a, b>, we send replica a's input to replica b.
Each replica id must only appear once in the source column. Also it must
only appear once in the target column.
For the replica id not in the target column, this op returns a zero tensor
with the same shape and dtype of the input x.
For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing
source_target_pairs=`[[0,1],[1,2],[2,3]]` gets the outputs:
`[0, A, B, C]`.
Args:
x: The local tensor to be permuted.
source_target_pairs: 2d int lists with shape [num_pairs, 2].
source_target_pairs[i][0] represents the source replica id and
source_target_pairs[i][1] represents the target replica id.
name: Optional op name.
Returns:
A `Tensor` which is permuted.
"""
return gen_tpu_ops.collective_permute(x, source_target_pairs, name=name)
@ops.RegisterGradient("CollectivePermute")
def _collective_permute_grad(op, grad):
# The gradient of a collective permute operation is also a collective
# permute, but with source/target pairs reversed. The gradient with respect
# to input argument `source_target_pairs` is `None`.
source_target_pairs = op.inputs[1][:, ::-1]
return [gen_tpu_ops.collective_permute(grad, source_target_pairs), None]
@ops.RegisterGradient("CrossReplicaSum")
def _cross_replica_sum_grad(op, grad):
# The gradient of a cross replica sum is also a cross-replica sum.
# The gradient with respect to group_assignment is None.
return [gen_tpu_ops.cross_replica_sum(grad, op.inputs[1]), None]
# This extra type checking exists to give a more helpful error message in
# the common case that uint8 and int64 values are infed. Remove when both
# types are supported.
_SUPPORTED_INFEED_DTYPES = set([
dtypes.bool, dtypes.int32, dtypes.int64, dtypes.bfloat16, dtypes.float32,
dtypes.complex64, dtypes.uint32
])
@ops.RegisterGradient("TPUEmbeddingActivations")
def _embedding_activations_grad(activations_op, grad_wrt_activations):
"""Saves the gradient of embedding activations ops in a graph collection."""
g = ops.get_default_graph()
table_id = activations_op.get_attr("table_id")
lookup_id = activations_op.get_attr("lookup_id")
table_gradients = g.get_collection_ref(
"tpu_embedding_gradients_table_%d" % table_id)
if not table_gradients:
raise RuntimeError(
"Gradients for TPUEmbedding have been generated in non-training mode."
"This is not expected. Consider putting your Optimizer.minimize code "
"behind the training mode condition check. For Estimator, you can "
"do \n\n"
" if mode == tf.estimator.ModeKeys.TRAIN:\n"
" train_op = opt.minimize(loss)\n"
"\n")
table_gradients[lookup_id] = array_ops.identity(grad_wrt_activations)
return [
# RegisterGradient requires that value be returned for all inputs. Since
# the first argument (tpu_gradient_variable_{table_name}) has shape [1],
# we will return zeros(shape=[1]). The actual gradient w.r.t. the
# embedding activations (grad_wrt_activations) has the same shape as the
# activations returned by embedding_activations.
array_ops.zeros(arg.shape, dtype=dtypes.float32)
for arg in activations_op.inputs
]
def infeed_dequeue(dtype, shape, name=None):
"""A placeholder op for a value that will be fed into the computation.
Args:
dtype: A `tf.DType`. The type of elements in the tensor.
shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
A tensor that will be provided using the infeed mechanism.
Raises:
TypeError: If 'dtype` is not a supported infeed type.
"""
if dtype not in _SUPPORTED_INFEED_DTYPES:
raise TypeError(
"{} is not a supported TPU infeed type. Supported types are: "
"{}".format(dtype, list(_SUPPORTED_INFEED_DTYPES)))
return gen_tpu_ops.infeed_dequeue(dtype, shape, name=name)
# pylint: disable=redefined-outer-name
def infeed_dequeue_tuple(dtypes, shapes, name=None):
"""A placeholder op for values fed into the TPU simultaneously as a tuple.
Args:
dtypes: A list of `tf.DType`s that has length `>= 1`.
The element types of each element in `outputs`.
shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
The shapes of each tensor in `outputs`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
A list of tensors that will be provided using the infeed mechanism.
Raises:
TypeError: If a type in 'dtypes` is not a supported infeed type.
"""
for dtype in dtypes:
if dtype not in _SUPPORTED_INFEED_DTYPES:
raise TypeError(
"{} is not a supported TPU infeed type. Supported types are: "
"{}".format(dtype, list(_SUPPORTED_INFEED_DTYPES)))
return gen_tpu_ops.infeed_dequeue_tuple(dtypes, shapes, name=name)
# pylint: enable=redefined-outer-name
# pylint: disable=protected-access
def send_tpu_embedding_gradients(inputs,
config,
learning_rates=None,
name=None):
"""A placeholder op for feeding per-sample gradients to the embedding layer.
Args:
inputs: A TensorList of gradients with which to update embedding tables.
This argument has the same length and shapes as the return value of
RecvTPUEmbeddingActivations, but contains gradients of the model's
loss with respect to the embedding activations. The embedding tables
are updated from these gradients via the optimizers specified in the
TPU embedding configuration given to tpu.initialize_system.
config: Serialized TPUEmbeddingConfiguration proto.
learning_rates: A TensorList of float32 scalars, one for each dynamic
learning rate tag: see the comments in
//third_party/tensorflow/core/protobuf/tpu/
optimization_parameters.proto.
Multiple tables can share the same dynamic learning rate tag as
specified in the configuration. If the learning rates for all tables
are constant, this list should be empty.
name: A name for the operation (optional).
Returns:
A SendTPUEmbeddingGradients operation.
"""
if learning_rates is None:
learning_rates = []
return gen_tpu_ops.send_tpu_embedding_gradients(
inputs=inputs, learning_rates=learning_rates, config=config, name=name)
send_tpu_embedding_gradients.__doc__ = (
gen_tpu_ops.send_tpu_embedding_gradients.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_integer_batch(batch,
device_ordinal,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
batch: A list of 1D tensors, one for each embedding table, containing the
indices into the tables.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingIntegerBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops.enqueue_tpu_embedding_integer_batch(
batch=batch,
device_ordinal=device_ordinal,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_integer_batch.__doc__ = (
gen_tpu_ops.enqueue_tpu_embedding_integer_batch.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_sparse_batch(sample_indices,
embedding_indices,
aggregation_weights,
device_ordinal,
combiners=None,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
sample_indices: A list of rank 1 Tensors specifying the training example
and feature to which the corresponding embedding_indices and
aggregation_weights values belong. sample_indices[i] must equal b * nf +
f, where nf is the number of features from the corresponding table, f is
in [0, nf), and b is in [0, batch size). Both int32 and int64 are allowed,
and will be converted to int32 internally.
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables. Both int32 and int64 are allowed and will be converted to int32
internally.
aggregation_weights: A list of rank 1 Tensors containing per sample --
i.e. per (training example, feature) -- aggregation weights. Both float32
and float64 are allowed and will be converted to float32 internally.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
combiners: A list of string scalars, one for each embedding table that
specify how to normalize the embedding activations after weighted
summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is
invalid to have the sum of the weights be 0 for 'mean' or the sum of the
squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default
is to use 'sum' for all tables (optional).
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingSparseBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops.enqueue_tpu_embedding_sparse_batch(
sample_indices=sample_indices,
embedding_indices=embedding_indices,
aggregation_weights=aggregation_weights,
device_ordinal=device_ordinal,
combiners=combiners,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_sparse_batch.__doc__ = (
gen_tpu_ops.enqueue_tpu_embedding_sparse_batch.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_sparse_tensor_batch(sample_indices,
embedding_indices,
aggregation_weights,
table_ids,
device_ordinal,
combiners=None,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
sample_indices: A list of rank 1 Tensors specifying the training example
to which the corresponding embedding_indices and aggregation_weights
values belong. It corresponds to sp_ids.indices[:,0] in
embedding_lookup_sparse(). Both int32 and int64 are allowed and will be
converted to int32 internally.
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables. It corresponds to sp_ids.values in embedding_lookup_sparse(). Both
int32 and int64 are allowed and will be converted to int32 internally.
aggregation_weights: A list of rank 1 Tensors containing per training
example aggregation weights. It corresponds to sp_weights.values in
embedding_lookup_sparse(). Both float32 and float64 are allowed and will
be converted to float32 internally.
table_ids: A list of integers specifying the identifier of the embedding
table (offset of TableDescriptor in the TPUEmbeddingConfiguration) to
lookup the corresponding input. The ith input is looked up using
table_ids[i]. The size of the table_ids list must be equal to that of
sample_indices, embedding_indices and aggregation_weights.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
combiners: A list of string scalars, one for each embedding table that
specify how to normalize the embedding activations after weighted
summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is
invalid to have the sum of the weights be 0 for 'mean' or the sum of the
squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default
is to use 'sum' for all tables (optional).
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingSparseTensorBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch(
sample_indices=sample_indices,
embedding_indices=embedding_indices,
aggregation_weights=aggregation_weights,
table_ids=table_ids,
device_ordinal=device_ordinal,
combiners=combiners,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_sparse_tensor_batch.__doc__ = (
gen_tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch.__doc__)
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
is_py2 = sys.version[0] == "2"
if is_py2:
import Queue as queue
else:
import queue as queue
import threading
from resnet.utils import logger
from resnet.utils.batch_iter import IBatchIterator, BatchIterator
class BatchProducer(threading.Thread):
def __init__(self, q, batch_iter):
super(BatchProducer, self).__init__()
threading.Thread.__init__(self)
self.q = q
self.batch_iter = batch_iter
self.log = logger.get()
self._stop = threading.Event()
self.daemon = True
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def run(self):
while not self.stopped():
try:
b = self.batch_iter.next()
self.q.put(b)
except StopIteration:
self.q.put(None)
break
pass
pass
class BatchConsumer(threading.Thread):
def __init__(self, q):
super(BatchConsumer, self).__init__()
self.q = q
self.daemon = True
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def run(self):
while not self.stopped():
try:
self.q.get(False)
self.q.task_done()
except queue.Empty:
pass
pass
pass
class ConcurrentBatchIterator(IBatchIterator):
def __init__(self,
batch_iter,
max_queue_size=10,
num_threads=5,
log_queue=20,
name=None):
"""
Data provider wrapper that supports concurrent data fetching.
"""
super(ConcurrentBatchIterator, self).__init__()
self.max_queue_size = max_queue_size
self.num_threads = num_threads
self.q = queue.Queue(maxsize=max_queue_size)
self.log = logger.get()
self.batch_iter = batch_iter
self.fetchers = []
self.init_fetchers()
self.counter = 0
self.relaunch = True
self._stopped = False
self.log_queue = log_queue
self.name = name
pass
def __len__(self):
return len(self.batch_iter)
def init_fetchers(self):
for ii in xrange(self.num_threads):
f = BatchProducer(self.q, self.batch_iter)
f.start()
self.fetchers.append(f)
pass
def get_name(self):
if self.name is not None:
return "Queue \"{}\":".format(self.name)
else:
return ""
def info(self, message):
self.log.info("{} {}".format(self.get_name(), message), verbose=2)
def warning(self, message):
self.log.warning("{} {}".format(self.get_name(), message))
def scan(self, do_print=False):
dead = []
num_alive = 0
for ff in self.fetchers:
if not ff.is_alive():
dead.append(ff)
self.info("Found one dead thread.")
if self.relaunch:
self.info("Relaunch")
fnew = BatchProducer(self.q, self.batch_iter)
fnew.start()
self.fetchers.append(fnew)
else:
num_alive += 1
if do_print:
self.info("Number of alive threads: {}".format(num_alive))
s = self.q.qsize()
if s > self.max_queue_size / 3:
self.info("Data queue size: {}".format(s))
else:
self.warning("Data queue size: {}".format(s))
for dd in dead:
self.fetchers.remove(dd)
pass
def next(self):
if self._stopped:
raise StopIteration
self.scan(do_print=(self.counter % self.log_queue == 0))
if self.counter % self.log_queue == 0:
self.counter = 0
batch = self.q.get()
self.q.task_done()
self.counter += 1
while batch is None:
self.info("Got an empty batch. Ending iteration.")
self.relaunch = False
try:
batch = self.q.get(False)
self.q.task_done()
qempty = False
except queue.Empty:
qempty = True
pass
if qempty:
self.info("Queue empty. Scanning for alive thread.")
# Scan for alive thread.
found_alive = False
for ff in self.fetchers:
if ff.is_alive():
found_alive = True
break
self.info("No alive thread found. Joining.")
# If no alive thread, join all.
if not found_alive:
for ff in self.fetchers:
ff.join()
self._stopped = True
raise StopIteration
else:
self.info("Got another batch from the queue.")
return batch
def reset(self):
self.info("Resetting concurrent batch iter")
self.info("Stopping all workers")
for f in self.fetchers:
f.stop()
self.info("Cleaning queue")
cleaner = BatchConsumer(self.q)
cleaner.start()
for f in self.fetchers:
f.join()
self.q.join()
cleaner.stop()
self.info("Resetting index")
self.batch_iter.reset()
self.info("Restarting workers")
self.fetchers = []
self.init_fetchers()
self.relaunch = True
self._stopped = False
pass
pass
if __name__ == "__main__":
from batch_iter import BatchIterator
b = BatchIterator(100, batch_size=6, get_fn=None)
cb = ConcurrentBatchIterator(b, max_queue_size=5, num_threads=3)
for _batch in cb:
log = logger.get()
log.info(("Final out", _batch))
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enhancement factors."""
import itertools
import json
import jax.numpy as jnp
import numpy as onp
import sympy
from symbolic_functionals.syfes.symbolic import graphviz
from symbolic_functionals.syfes.symbolic import instructions
class EnhancementFactor:
"""Enhancement factor.
An enhancement factor is defined as a list of instructions. An enhancement
factor can be applied to a workspace, which is a dictionary {quantity_name:
quantity_value} of quantities. Quantities include features, parameters and
variables. Features are constant input 1D arrays defined on grids (e.g.
density, density gradient, etc.); parameters are scalar values subject to
optimization; variables are temporary intermediate quantities. A special
variable named 'enhancement_factor' denotes the resulting enhancement factor
values on grids.
"""
_isomorphic_copy_shared_parameter_prefix = 'c'
_isomorphic_copy_variable_prefix = 'v'
def __init__(self,
feature_names=None,
shared_parameter_names=None,
variable_names=None,
instruction_list=None):
"""Initializes an enhancement factor.
Args:
feature_names: List of strings, the names for features. Features
are 1D float numpy arrays.
shared_parameter_names: List of strings, the names for shared parameters.
Shared parameters are scalars that can be shared by multiple
instructions.
variable_names: List of strings, the names for variables. The variable
names should include an element called 'enhancement_factor', which will
be taken as the resulting enhancement factor after all instructions are
applied to a workspace.
instruction_list: List of instructions.Instruction instances, the sequence
of instructions that defines the enhancement factor.
"""
self.feature_names = feature_names or []
self.shared_parameter_names = shared_parameter_names or []
self.variable_names = variable_names or []
self.allowed_input_names = (
self.feature_names + self.shared_parameter_names + self.variable_names)
self.instruction_list = instruction_list or []
self.validate()
self.bound_parameter_names = list(set(itertools.chain(
*[instruction.get_bound_parameters()
for instruction in self.instruction_list])))
self.parameter_names = (
self.shared_parameter_names + self.bound_parameter_names)
shared_parameter_is_used = {
name: False for name in self.shared_parameter_names}
for instruction in self.instruction_list:
for arg in instruction.args:
if arg in shared_parameter_is_used:
shared_parameter_is_used[arg] = True
self.used_shared_parameter_names = [
name for name in self.shared_parameter_names
if shared_parameter_is_used[name]]
self.used_parameter_names = (
self.used_shared_parameter_names + self.bound_parameter_names)
def validate(self):
"""Validates names and instructions.
Raises:
TypeError: if instruction is not an instance of instructions.Instruction.
ValueError: if enhancement_factor not in variable names,
or repeated entries found in feature, parameter or variable names,
or instruction contains invalid input or output names.
"""
# validate instruction list
for instruction in self.instruction_list:
if not isinstance(instruction, instructions.Instruction):
raise TypeError(f'{instruction} is of type {type(instruction)}, not an '
'instance of instructions.Instruction')
# validate names
if 'enhancement_factor' not in self.variable_names:
raise ValueError('"enhancement_factor" not found in variable_names.')
if len(self.allowed_input_names) != len(set(self.allowed_input_names)):
raise ValueError('Repeated names found in input.')
# validates instruction arguments
for instruction in self.instruction_list:
if not isinstance(instruction, instructions.Instruction):
raise TypeError(f'{instruction} is of type {type(instruction)}, not an '
'instance of instructions.Instruction')
if instruction.output not in self.variable_names:
raise ValueError(f'Instruction {instruction} contains invalid output '
f'argument {instruction.output}')
for arg in instruction.inputs:
if arg not in self.allowed_input_names:
raise ValueError(f'Instruction {instruction} contains invalid input '
f'argument {arg}')
@property
def num_features(self):
"""Number of features."""
return len(self.feature_names)
@property
def num_shared_parameters(self):
"""Number of shared parameters."""
return len(self.shared_parameter_names)
@property
def num_bound_parameters(self):
"""Number of bound parameters."""
return len(self.bound_parameter_names)
@property
def num_parameters(self):
"""Number of parameters."""
return len(self.parameter_names)
@property
def num_used_parameters(self):
"""Number of used parameters."""
return len(self.used_parameter_names)
@property
def num_variables(self):
"""Number of variables including 'enhancement_factor'."""
return len(self.variable_names)
@property
def num_instructions(self):
"""Number of instructions."""
return len(self.instruction_list)
def eval(self, features, parameters, use_jax=True):
"""Evaluates the enhancement factor on grids.
Args:
features: Dict {feature_name: feature_value}, the input features.
parameters: Dict {parameter_name: parameter_values}, the scalar parameters
in the functional form.
use_jax: Boolean, if True, use jax.numpy for calculations, otherwise use
numpy.
Returns:
Float numpy array with shape (num_grids_all,), the enhancement factor
on grids.
"""
np = jnp if use_jax else onp
workspace = {
**features,
**{parameter_name: np.array(parameter_value)
for parameter_name, parameter_value in parameters.items()},
**{variable_name: np.array(0.)
for variable_name in self.variable_names}
}
for instruction in self.instruction_list:
instruction.apply(workspace, use_jax=use_jax)
return workspace['enhancement_factor']
def to_dict(self):
"""Converts the enhancement factor to dictionary.
Returns:
Dict, the dictionary representation of enhancement factor.
"""
return {
'feature_names': list(self.feature_names),
'shared_parameter_names': list(self.shared_parameter_names),
'variable_names': list(self.variable_names),
'instructions': [
instruction.to_list() for instruction in self.instruction_list]
}
@staticmethod
def from_dict(dictionary):
"""Loads enhancement factor from dictionary.
Args:
dictionary: Dict, the dictionary representation of enhancement factor.
Returns:
Instance of EnhancementFactor, the loaded enhancement factor.
"""
return EnhancementFactor(
feature_names=dictionary['feature_names'],
shared_parameter_names=dictionary['shared_parameter_names'],
variable_names=dictionary['variable_names'],
instruction_list=[
instructions.Instruction.from_list(lst)
for lst in dictionary['instructions']])
def make_isomorphic_copy(self,
feature_names=None,
num_shared_parameters=None,
num_variables=None,):
"""Makes an isomorphic copy of the EnhancementFactor instance.
Here isomorphic copy denotes that the new EnhancementFactor instance will
have identical instruction list with the currect instance, while all shared
parameters and variables will be renamed based on class attributes
_isomorphic_copy_shared_parameter_prefix and
_isomorphic_copy_variable_prefix.
The number of shared parameters and variables of the new instance can be
specified, provided they are greater or equal to those of current instance.
The feature names of the new instance can also be specified, as long as they
constitute a superset of features names of current instance.
Empty EnhancementFactors with desired features and number of shared
parameters and variables can be constructed by calling this method of the
f_empty object defined in this module.
Args:
feature_names: List of strings, if present, defines the feature names of
the copy. Must be a superset of self.feature_names.
num_shared_parameters: Integer, if present, specifies the number of
shared parameters of the copy. Must be greater or equal to
self.num_shared_parameters.
num_variables: Integer, if present, specifies the number of variables of
the copy. Must be greater or equal to self.num_variables.
Returns:
Instance of EnhancementFactor, the isomorphic copy.
Raises:
ValueError, if feature_names contains repeated elements,
or if feature_names is not a superset of self.feature_names,
or num_shared_parameters or num_variables is smaller than those of
current instance.
"""
if feature_names is None:
feature_names = self.feature_names
else:
if len(feature_names) != len(set(feature_names)):
raise ValueError('Repeated feature names')
if not set(feature_names).issuperset(set(self.feature_names)):
raise ValueError(
f'feature_names {feature_names} is not a superset of '
f'feature_names of current instance {self.feature_names}')
if num_shared_parameters is None:
num_shared_parameters = self.num_shared_parameters
else:
if num_shared_parameters < self.num_shared_parameters:
raise ValueError(
f'num_shared_parameters {num_shared_parameters} is smaller than '
f'that of current instance {self.num_shared_parameters}')
if num_variables is None:
num_variables = self.num_variables
else:
if num_variables < self.num_variables:
raise ValueError(
f'num_variables {num_variables} is smaller than '
f'that of current instance {self.num_variables}')
name_mapping = {
feature_name: feature_name for feature_name in self.feature_names}
name_mapping['enhancement_factor'] = 'enhancement_factor'
for index, shared_parameter_name in enumerate(self.shared_parameter_names):
name_mapping[shared_parameter_name] = (
self._isomorphic_copy_shared_parameter_prefix + str(index))
index = 0
for variable_name in self.variable_names:
if variable_name == 'enhancement_factor':
continue
name_mapping[variable_name] = (
self._isomorphic_copy_variable_prefix + str(index))
index += 1
assert len(name_mapping) == len(self.allowed_input_names)
return EnhancementFactor(
feature_names=feature_names,
shared_parameter_names=[
self._isomorphic_copy_shared_parameter_prefix + str(index)
for index in range(num_shared_parameters)],
variable_names=[
self._isomorphic_copy_variable_prefix + str(index)
for index in range(num_variables - 1)] + ['enhancement_factor'],
instruction_list=[
instruction.__class__(
*[name_mapping[arg] for arg in instruction.args])
for instruction in self.instruction_list])
def get_graph(self):
"""Gets graph representation of enhancement factor."""
return graphviz.create_graph(
feature_names=self.feature_names,
shared_parameter_names=self.shared_parameter_names,
bound_parameter_names=self.bound_parameter_names,
variable_names=self.variable_names,
instruction_list=self.to_dict()['instructions']
)
def get_symbolic_expression(self, latex=True, simplify=False):
"""Gets symbolic expression of enhancement factor.
Args:
latex: Boolean, if True, the symbolic representation will be returned
as a string in the latex format.
simplify: Boolean, whether to simplify the expression.
Returns:
Sympy expression or string, the symbolic representation of enhancement
factor.
"""
workspace = {
**{feature_name: sympy.Symbol(feature_name)
for feature_name in self.feature_names},
**{parameter_name: sympy.Symbol(parameter_name)
for parameter_name in self.parameter_names},
**{variable_name: 0. for variable_name in self.variable_names}
}
if 'x2' in workspace:
workspace['x2'] = sympy.Symbol('x') ** 2
for instruction in self.instruction_list:
instruction.sympy_apply(workspace)
enhancement_factor = workspace['enhancement_factor']
if simplify:
enhancement_factor = sympy.simplify(enhancement_factor)
if latex:
# replace utransform by u to simplify the expression
return sympy.latex(enhancement_factor).replace('utransform', 'u')
else:
return enhancement_factor
def __eq__(self, other):
return all([self.feature_names == other.feature_names,
self.shared_parameter_names == other.shared_parameter_names,
self.variable_names == other.variable_names,
self.instruction_list == other.instruction_list])
def __str__(self):
return json.dumps(self.to_dict(), indent=2)
def __repr__(self):
return self.__str__()
# empty enhancement factor
f_empty = EnhancementFactor(
feature_names=[],
shared_parameter_names=[],
variable_names=['enhancement_factor'])
# LDA enhancement factor (identity)
f_lda = EnhancementFactor(
feature_names=[],
shared_parameter_names=[],
variable_names=['enhancement_factor'],
instruction_list=[
instructions.AdditionBy1Instruction(
'enhancement_factor', 'enhancement_factor'),])
# B97 enhancement factor as a function of u
f_b97_u = EnhancementFactor(
feature_names=['u'],
shared_parameter_names=['c0', 'c1', 'c2'],
variable_names=['c1u', 'c2u2', 'enhancement_factor'],
instruction_list=[
# calculation of u
instructions.MultiplicationInstruction('c1u', 'u', 'c1'),
instructions.Power2Instruction('c2u2', 'u'),
# power series
instructions.MultiplicationInstruction('c2u2', 'c2', 'c2u2'),
instructions.AdditionInstruction('enhancement_factor', 'c0', 'c1u'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'c2u2'),
])
# B97 enhancement factor as a function of u (short version)
f_b97_u_short = EnhancementFactor(
feature_names=['u'],
shared_parameter_names=['c0', 'c1', 'c2'],
variable_names=['u2', 'enhancement_factor'],
instruction_list=[
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'c0'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c1', 'u'),
instructions.Power2Instruction('u2', 'u'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c2', 'u2'),
]
)
# B97 enhancement factor as a function of x^2
f_b97_x2 = EnhancementFactor(
feature_names=['x2'],
shared_parameter_names=['gamma', 'c0', 'c1', 'c2'],
variable_names=['gx2', 'u', 'c1u', 'c2u2', 'enhancement_factor'],
instruction_list=[
# calculation of u
instructions.MultiplicationInstruction('gx2', 'gamma', 'x2'),
instructions.AdditionBy1Instruction('u', 'gx2'),
instructions.DivisionInstruction('u', 'gx2', 'u'),
# power series
instructions.MultiplicationInstruction('c1u', 'u', 'c1'),
instructions.Power2Instruction('c2u2', 'u'),
instructions.MultiplicationInstruction('c2u2', 'c2', 'c2u2'),
instructions.AdditionInstruction('enhancement_factor', 'c0', 'c1u'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'c2u2'),
]
)
# B97 enhancement factor as a function of x^2 (short version)
f_b97_x2_short = EnhancementFactor(
feature_names=['x2'],
shared_parameter_names=['c0', 'c1', 'c2'],
variable_names=['u', 'u2', 'enhancement_factor'],
instruction_list=[
# calculation of u
instructions.UTransformInstruction('u', 'x2'),
# power series
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'c0'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c1', 'u'),
instructions.Power2Instruction('u2', 'u'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c2', 'u2'),
]
)
f_x_wb97mv = EnhancementFactor(
feature_names=['w', 'x2'],
shared_parameter_names=['gamma', 'c00', 'c10', 'c01'],
variable_names=['gx2', 'u', 'linear_term', 'enhancement_factor'],
instruction_list=[
# calculation of u
instructions.MultiplicationInstruction('gx2', 'gamma', 'x2'),
instructions.AdditionBy1Instruction('u', 'gx2'),
instructions.DivisionInstruction('u', 'gx2', 'u'),
# power series
instructions.AdditionInstruction(
'enhancement_factor', 'c00', 'enhancement_factor'),
instructions.MultiplicationInstruction(
'linear_term', 'c10', 'w'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
instructions.MultiplicationInstruction(
'linear_term', 'c01', 'u'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
]
)
f_css_wb97mv = EnhancementFactor(
feature_names=['w', 'x2'],
shared_parameter_names=['gamma', 'c00', 'c10', 'c20', 'c43', 'c04'],
variable_names=[
'gx2', 'u', 'u2', 'w2', 'linear_term', 'enhancement_factor'],
instruction_list=[
# calculation of u
instructions.MultiplicationInstruction('gx2', 'gamma', 'x2'),
instructions.AdditionBy1Instruction('u', 'gx2'),
instructions.DivisionInstruction('u', 'gx2', 'u'),
# calculation of w^2 and u^2
instructions.Power2Instruction('w2', 'w'),
instructions.Power2Instruction('u2', 'u'),
# power series
instructions.AdditionInstruction(
'enhancement_factor', 'c00', 'enhancement_factor'),
instructions.MultiplicationInstruction(
'linear_term', 'c10', 'w'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
instructions.MultiplicationInstruction(
'linear_term', 'c20', 'w2'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
instructions.Power2Instruction(
'linear_term', 'w2'),
instructions.MultiplicationInstruction(
'linear_term', 'linear_term', 'u2'),
instructions.MultiplicationInstruction(
'linear_term', 'linear_term', 'u'),
instructions.MultiplicationInstruction(
'linear_term', 'c43', 'linear_term'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
instructions.Power2Instruction(
'linear_term', 'u2'),
instructions.MultiplicationInstruction(
'linear_term', 'c04', 'linear_term'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
]
)
f_cos_wb97mv = EnhancementFactor(
feature_names=['w', 'x2'],
shared_parameter_names=['gamma', 'c00', 'c10', 'c20', 'c60', 'c61', 'c21'],
variable_names=[
'gx2', 'u', 'u2', 'w2', 'linear_term', 'enhancement_factor'],
instruction_list=[
# calculation of u
instructions.MultiplicationInstruction('gx2', 'gamma', 'x2'),
instructions.AdditionBy1Instruction('u', 'gx2'),
instructions.DivisionInstruction('u', 'gx2', 'u'),
# calculation of w^2 and u^2
instructions.Power2Instruction('w2', 'w'),
instructions.Power2Instruction('u2', 'u'),
# power series
instructions.AdditionInstruction(
'enhancement_factor', 'c00', 'enhancement_factor'),
instructions.MultiplicationInstruction(
'linear_term', 'c10', 'w'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
instructions.MultiplicationInstruction(
'linear_term', 'c20', 'w2'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
instructions.Power2Instruction(
'linear_term', 'w2'),
instructions.MultiplicationInstruction(
'linear_term', 'linear_term', 'w2'),
instructions.MultiplicationInstruction(
'linear_term', 'c60', 'linear_term'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
instructions.Power2Instruction(
'linear_term', 'w2'),
instructions.MultiplicationInstruction(
'linear_term', 'linear_term', 'w2'),
instructions.MultiplicationInstruction(
'linear_term', 'linear_term', 'u'),
instructions.MultiplicationInstruction(
'linear_term', 'c61', 'linear_term'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
instructions.MultiplicationInstruction(
'linear_term', 'w2', 'u'),
instructions.MultiplicationInstruction(
'linear_term', 'c21', 'linear_term'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
]
)
f_x_wb97mv_short = EnhancementFactor(
feature_names=['w', 'x2'],
shared_parameter_names=['c00', 'c10', 'c01'],
variable_names=['u', 'linear_term', 'enhancement_factor'],
instruction_list=[
# calculation of u
instructions.UTransformInstruction('u', 'x2'),
# power series
instructions.AdditionInstruction(
'enhancement_factor', 'c00', 'enhancement_factor'),
instructions.MultiplicationInstruction(
'linear_term', 'c10', 'w'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
instructions.MultiplicationInstruction(
'linear_term', 'c01', 'u'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
]
)
f_css_wb97mv_short = EnhancementFactor(
feature_names=['w', 'x2'],
shared_parameter_names=['c00', 'c10', 'c20', 'c43', 'c04'],
variable_names=[
'u', 'upower', 'wpower', 'linear_term', 'enhancement_factor'],
instruction_list=[
# calculation of u
instructions.UTransformInstruction('u', 'x2'),
# power series
instructions.AdditionInstruction(
'enhancement_factor', 'c00', 'enhancement_factor'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c10', 'w'),
instructions.Power2Instruction(
'wpower', 'w'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c20', 'wpower'),
instructions.Power4Instruction(
'wpower', 'w'),
instructions.Power3Instruction(
'upower', 'u'),
instructions.MultiplicationInstruction(
'linear_term', 'wpower', 'upower'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c43', 'linear_term'),
instructions.Power4Instruction(
'upower', 'u'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c04', 'upower'),
]
)
f_cos_wb97mv_short = EnhancementFactor(
feature_names=['w', 'x2'],
shared_parameter_names=['c00', 'c10', 'c20', 'c60', 'c61', 'c21'],
variable_names=[
'u', 'upower', 'wpower', 'linear_term', 'enhancement_factor'],
instruction_list=[
# calculation of u
instructions.UTransformInstruction('u', 'x2'),
# power series
instructions.AdditionInstruction(
'enhancement_factor', 'c00', 'enhancement_factor'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c10', 'w'),
instructions.Power2Instruction(
'wpower', 'w'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c20', 'wpower'),
instructions.MultiplicationInstruction(
'linear_term', 'wpower', 'u'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c21', 'linear_term'),
instructions.Power6Instruction(
'wpower', 'w'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c60', 'wpower'),
instructions.MultiplicationInstruction(
'linear_term', 'wpower', 'u'),
instructions.MultiplicationAdditionInstruction(
'enhancement_factor', 'c61', 'linear_term'),
]
)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Moves C++ files to a new location, updating any include paths that point
to them, and re-ordering headers as needed. If multiple source files are
specified, the destination must be a directory. Updates include guards in
moved header files. Assumes Chromium coding style.
Attempts to update and reorder paths used in .gyp(i) files.
Updates full-path references to files in // comments in source files.
Must run in a git checkout, as it relies on git grep for a fast way to
find files that reference the moved file.
"""
from __future__ import print_function
import optparse
import os
import re
import subprocess
import sys
import mffr
if __name__ == '__main__':
# Need to add the directory containing sort-headers.py to the Python
# classpath.
sys.path.append(os.path.abspath(os.path.join(sys.path[0], '..')))
sort_headers = __import__('sort-headers')
import sort_sources
HANDLED_EXTENSIONS = ['.cc', '.mm', '.h', '.hh', '.cpp']
def IsHandledFile(path):
return os.path.splitext(path)[1] in HANDLED_EXTENSIONS
def MakeDestinationPath(from_path, to_path):
"""Given the from and to paths, return a correct destination path.
The initial destination path may either a full path or a directory.
Also does basic sanity checks.
"""
if not IsHandledFile(from_path):
raise Exception('Only intended to move individual source files '
'(%s does not have a recognized extension).' %
from_path)
# Remove '.', '..', etc.
to_path = os.path.normpath(to_path)
if os.path.isdir(to_path):
to_path = os.path.join(to_path, os.path.basename(from_path))
else:
dest_extension = os.path.splitext(to_path)[1]
if dest_extension not in HANDLED_EXTENSIONS:
raise Exception('Destination must be either a full path with '
'a recognized extension or a directory.')
return to_path
def UpdateIncludePathForBlink(path):
"""Updates |path| as it would be when used in an include statement in Blink.
As Blink has its 'public' and 'Source' folders in the include search path,
these prefixes of file paths are not included in include statements. For
example, if |path| is 'public/foo/bar.h', the matching include statement
is '#include "foo/bar.h"'.
"""
for prefix in ('public/', 'Source/'):
if path.startswith(prefix):
return path[len(prefix):]
return path
def MoveFile(from_path, to_path):
"""Performs a git mv command to move a file from |from_path| to |to_path|.
"""
if not os.system('git mv %s %s' % (from_path, to_path)) == 0:
raise Exception('Fatal: Failed to run git mv command.')
def UpdatePostMove(from_path, to_path, in_blink):
"""Given a file that has moved from |from_path| to |to_path|,
updates the moved file's include guard to match the new path and
updates all references to the file in other source files. Also tries
to update references in .gyp(i) files using a heuristic.
"""
# Include paths always use forward slashes.
from_path = from_path.replace('\\', '/')
to_path = to_path.replace('\\', '/')
if os.path.splitext(from_path)[1] in ['.h', '.hh']:
UpdateIncludeGuard(from_path, to_path)
from_include_path = from_path
to_include_path = to_path
if in_blink:
from_include_path = UpdateIncludePathForBlink(from_include_path)
to_include_path = UpdateIncludePathForBlink(to_include_path)
# Update include/import references.
files_with_changed_includes = mffr.MultiFileFindReplace(
r'(#(include|import)\s*["<])%s([>"])' % re.escape(from_include_path),
r'\1%s\3' % to_include_path,
['*.cc', '*.h', '*.m', '*.mm', '*.cpp'])
# Reorder headers in files that changed.
for changed_file in files_with_changed_includes:
def AlwaysConfirm(a, b): return True
sort_headers.FixFileWithConfirmFunction(changed_file, AlwaysConfirm, True,
in_blink)
# Update comments; only supports // comments, which are primarily
# used in our code.
#
# This work takes a bit of time. If this script starts feeling too
# slow, one good way to speed it up is to make the comment handling
# optional under a flag.
mffr.MultiFileFindReplace(
r'(//.*)%s' % re.escape(from_path),
r'\1%s' % to_path,
['*.cc', '*.h', '*.m', '*.mm', '*.cpp'])
# Update references in GYP and BUILD.gn files.
#
# GYP files are mostly located under the first level directory (ex.
# chrome/chrome_browser.gypi), but sometimes they are located in
# directories at a deeper level (ex. extensions/shell/app_shell.gypi). On
# the other hand, BUILD.gn files can be placed in any directories.
#
# Paths in a GYP or BUILD.gn file are relative to the directory where the
# file is placed.
#
# For instance, "chrome/browser/chromeos/device_uma.h" is listed as
# "browser/chromeos/device_uma.h" in "chrome/chrome_browser_chromeos.gypi",
# but it's listed as "device_uma.h" in "chrome/browser/chromeos/BUILD.gn".
#
# To handle this, the code here will visit directories from the top level
# src directory to the directory of |from_path| and try to update GYP and
# BUILD.gn files in each directory.
#
# The code only handles files moved/renamed within the same build file. If
# files are moved beyond the same build file, the affected build files
# should be fixed manually.
def SplitByFirstComponent(path):
"""'foo/bar/baz' -> ('foo', 'bar/baz')
'bar' -> ('bar', '')
'' -> ('', '')
"""
parts = re.split(r"[/\\]", path, 1)
if len(parts) == 2:
return (parts[0], parts[1])
else:
return (parts[0], '')
visiting_directory = ''
from_rest = from_path
to_rest = to_path
while True:
files_with_changed_sources = mffr.MultiFileFindReplace(
r'([\'"])%s([\'"])' % from_rest,
r'\1%s\2' % to_rest,
[os.path.join(visiting_directory, 'BUILD.gn'),
os.path.join(visiting_directory, '*.gyp*')])
for changed_file in files_with_changed_sources:
sort_sources.ProcessFile(changed_file, should_confirm=False)
from_first, from_rest = SplitByFirstComponent(from_rest)
to_first, to_rest = SplitByFirstComponent(to_rest)
visiting_directory = os.path.join(visiting_directory, from_first)
if not from_rest or not to_rest or from_rest == to_rest:
break
def MakeIncludeGuardName(path_from_root):
"""Returns an include guard name given a path from root."""
guard = path_from_root.replace('/', '_')
guard = guard.replace('\\', '_')
guard = guard.replace('.', '_')
guard += '_'
return guard.upper()
def UpdateIncludeGuard(old_path, new_path):
"""Updates the include guard in a file now residing at |new_path|,
previously residing at |old_path|, with an up-to-date include guard.
Prints a warning if the update could not be completed successfully (e.g.,
because the old include guard was not formatted correctly per Chromium style).
"""
old_guard = MakeIncludeGuardName(old_path)
new_guard = MakeIncludeGuardName(new_path)
with open(new_path) as f:
contents = f.read()
new_contents = contents.replace(old_guard, new_guard)
# The file should now have three instances of the new guard: two at the top
# of the file plus one at the bottom for the comment on the #endif.
if new_contents.count(new_guard) != 3:
print('WARNING: Could not successfully update include guard; perhaps '
'old guard is not per style guide? You will have to update the '
'include guard manually. (%s)' % new_path)
with open(new_path, 'w') as f:
f.write(new_contents)
def main():
# We use "git rev-parse" to check if the script is run from a git checkout. It
# returns 0 even when run in the .git directory. We don't want people running
# this in the .git directory.
if (os.system('git rev-parse') != 0 or
os.path.basename(os.getcwd()) == '.git'):
print('Fatal: You must run in a git checkout.')
return 1
cwd = os.getcwd()
parent = os.path.dirname(cwd)
in_blink = (os.path.basename(parent) == 'third_party' and
os.path.basename(cwd) == 'WebKit')
parser = optparse.OptionParser(usage='%prog FROM_PATH... TO_PATH')
parser.add_option('--already_moved', action='store_true',
dest='already_moved',
help='Causes the script to skip moving the file.')
parser.add_option('--no_error_for_non_source_file', action='store_false',
default='True',
dest='error_for_non_source_file',
help='Causes the script to simply print a warning on '
'encountering a non-source file rather than raising an '
'error.')
opts, args = parser.parse_args()
if len(args) < 2:
parser.print_help()
return 1
from_paths = args[:len(args)-1]
orig_to_path = args[-1]
if len(from_paths) > 1 and not os.path.isdir(orig_to_path):
print('Target %s is not a directory.' % orig_to_path)
print()
parser.print_help()
return 1
for from_path in from_paths:
if not opts.error_for_non_source_file and not IsHandledFile(from_path):
print('%s does not appear to be a source file, skipping' % (from_path))
continue
to_path = MakeDestinationPath(from_path, orig_to_path)
if not opts.already_moved:
MoveFile(from_path, to_path)
UpdatePostMove(from_path, to_path, in_blink)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
# -*- coding:utf-8 -*-
import gettext
import json
import os
from os import path
import unittest
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import (
LiveServerTestCase, TestCase, modify_settings, override_settings)
from django.utils import six
from django.utils._os import upath
from django.utils.module_loading import import_string
from django.utils.translation import override, LANGUAGE_SESSION_KEY
from ..urls import locale_dir
@override_settings(ROOT_URLCONF='view_tests.urls')
class I18NTests(TestCase):
""" Tests django views in django/views/i18n.py """
def test_setlang(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the 'next' argument if provided.
"""
for lang_code, lang_name in settings.LANGUAGES:
post_data = dict(language=lang_code, next='/')
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertRedirects(response, 'http://testserver/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_unsafe_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe".
"""
lang_code, lang_name = settings.LANGUAGES[0]
post_data = dict(language=lang_code, next='//unsafe/redirection/')
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertEqual(response.url, 'http://testserver/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_reversal(self):
self.assertEqual(reverse('set_language'), '/i18n/setlang/')
def test_setlang_cookie(self):
# we force saving language to a cookie rather than a session
# by excluding session middleware and those which do require it
test_settings = dict(
MIDDLEWARE_CLASSES=('django.middleware.common.CommonMiddleware',),
LANGUAGE_COOKIE_NAME='mylanguage',
LANGUAGE_COOKIE_AGE=3600 * 7 * 2,
LANGUAGE_COOKIE_DOMAIN='.example.com',
LANGUAGE_COOKIE_PATH='/test/',
)
with self.settings(**test_settings):
post_data = dict(language='pl', next='/views/')
response = self.client.post('/i18n/setlang/', data=post_data)
language_cookie = response.cookies.get('mylanguage')
self.assertEqual(language_cookie.value, 'pl')
self.assertEqual(language_cookie['domain'], '.example.com')
self.assertEqual(language_cookie['path'], '/test/')
self.assertEqual(language_cookie['max-age'], 3600 * 7 * 2)
def test_jsi18n(self):
"""The javascript_catalog can be deployed with language settings"""
for lang_code in ['es', 'fr', 'ru']:
with override(lang_code):
catalog = gettext.translation('djangojs', locale_dir, [lang_code])
if six.PY3:
trans_txt = catalog.gettext('this is to be translated')
else:
trans_txt = catalog.ugettext('this is to be translated')
response = self.client.get('/jsi18n/')
# response content must include a line like:
# "this is to be translated": <value of trans_txt Python variable>
# json.dumps() is used to be able to check unicode strings
self.assertContains(response, json.dumps(trans_txt), 1)
if lang_code == 'fr':
# Message with context (msgctxt)
self.assertContains(response, r'"month name\u0004May": "mai"', 1)
@override_settings(ROOT_URLCONF='view_tests.urls')
class JsI18NTests(TestCase):
"""
Tests django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE.
"""
def test_jsi18n_with_missing_en_files(self):
"""
The javascript_catalog shouldn't load the fallback language in the
case that the current selected language is actually the one translated
from, and hence missing translation files completely.
This happens easily when you're translating from English to other
languages and you've set settings.LANGUAGE_CODE to some other language
than English.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'esto tiene que ser traducido')
def test_jsi18n_fallback_language(self):
"""
Let's make sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('fi'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'il faut le traduire')
def testI18NLanguageNonEnglishDefault(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English, the selected language
is English and there is not 'en' translation available. See #13388,
#3594 and #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
@modify_settings(INSTALLED_APPS={'append': 'view_tests.app0'})
def test_nonenglish_default_english_userpref(self):
"""
Same as above with the difference that there IS an 'en' translation
available. The Javascript i18n view must return a NON empty language catalog
with the proper English translations. See #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n_english_translation/')
self.assertContains(response, 'this app0 string is to be translated')
def testI18NLanguageNonEnglishFallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('none'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'Choisir une heure')
def test_escaping(self):
# Force a language via GET otherwise the gettext functions are a noop!
response = self.client.get('/jsi18n_admin/?language=de')
self.assertContains(response, '\\x04')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app5']})
def test_non_BMP_char(self):
"""
Non-BMP characters should not break the javascript_catalog (#21725).
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n/app5/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'emoji')
self.assertContains(response, '\\ud83d\\udca9')
@override_settings(ROOT_URLCONF='view_tests.urls')
class JsI18NTestsMultiPackage(TestCase):
"""
Tests for django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE and merge JS translation from several packages.
"""
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
def testI18NLanguageEnglishDefault(self):
"""
Check if the JavaScript i18n view returns a complete language catalog
if the default language is en-us, the selected language has a
translation available and a catalog composed by djangojs domain
translations of multiple Python packages is requested. See #13388,
#3594 and #13514 for more details.
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n_multi_packages1/')
self.assertContains(response, 'il faut traduire cette cha\\u00eene de caract\\u00e8res de app1')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app3', 'view_tests.app4']})
def testI18NDifferentNonEnLangs(self):
"""
Similar to above but with neither default or requested language being
English.
"""
with self.settings(LANGUAGE_CODE='fr'), override('es-ar'):
response = self.client.get('/jsi18n_multi_packages2/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def testI18NWithLocalePaths(self):
extended_locale_paths = settings.LOCALE_PATHS + (
path.join(path.dirname(
path.dirname(path.abspath(upath(__file__)))), 'app3', 'locale'),)
with self.settings(LANGUAGE_CODE='es-ar', LOCALE_PATHS=extended_locale_paths):
with override('es-ar'):
response = self.client.get('/jsi18n/')
self.assertContains(response,
'este texto de app3 debe ser traducido')
skip_selenium = not os.environ.get('DJANGO_SELENIUM_TESTS', False)
@unittest.skipIf(skip_selenium, 'Selenium tests not requested')
@override_settings(ROOT_URLCONF='view_tests.urls')
class JavascriptI18nTests(LiveServerTestCase):
# The test cases use translations from these apps.
available_apps = ['django.contrib.admin', 'view_tests']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
@classmethod
def setUpClass(cls):
try:
cls.selenium = import_string(cls.webdriver_class)()
except Exception as e:
raise unittest.SkipTest('Selenium webdriver "%s" not installed or '
'not operational: %s' % (cls.webdriver_class, str(e)))
super(JavascriptI18nTests, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(JavascriptI18nTests, cls).tearDownClass()
@override_settings(LANGUAGE_CODE='de')
def test_javascript_gettext(self):
self.selenium.get('%s%s' % (self.live_server_url, '/jsi18n_template/'))
elem = self.selenium.find_element_by_id("gettext")
self.assertEqual(elem.text, "Entfernen")
elem = self.selenium.find_element_by_id("ngettext_sing")
self.assertEqual(elem.text, "1 Element")
elem = self.selenium.find_element_by_id("ngettext_plur")
self.assertEqual(elem.text, "455 Elemente")
elem = self.selenium.find_element_by_id("pgettext")
self.assertEqual(elem.text, "Kann")
elem = self.selenium.find_element_by_id("npgettext_sing")
self.assertEqual(elem.text, "1 Resultat")
elem = self.selenium.find_element_by_id("npgettext_plur")
self.assertEqual(elem.text, "455 Resultate")
|
|
from sympy import (Add, conjugate, diff, I, Integer, latex, Mul, oo, pi, Pow,
pretty, Rational, sin, sqrt, Symbol, symbols, sympify)
from sympy.utilities.pytest import raises
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.qexpr import QExpr
from sympy.physics.quantum.state import (
Ket, Bra, TimeDepKet, TimeDepBra,
KetBase, BraBase, StateBase, Wavefunction
)
from sympy.physics.quantum.hilbert import HilbertSpace
x, y, t = symbols('x,y,t')
class TestKet(Ket):
@classmethod
def default_args(self):
return ("test",)
class TestKetMultipleLabels(Ket):
@classmethod
def default_args(self):
return ("r", "theta", "phi")
class TestTimeDepKet(TimeDepKet):
@classmethod
def default_args(self):
return ("test", "t")
class TestTimeDepKetMultipleLabels(TimeDepKet):
@classmethod
def default_args(self):
return ("r", "theta", "phi", "t")
def test_ket():
k = Ket('0')
assert isinstance(k, Ket)
assert isinstance(k, KetBase)
assert isinstance(k, StateBase)
assert isinstance(k, QExpr)
assert k.label == (Symbol('0'),)
assert k.hilbert_space == HilbertSpace()
assert k.is_commutative == False
# Make sure this doesn't get converted to the number pi.
k = Ket('pi')
assert k.label == (Symbol('pi'),)
k = Ket(x,y)
assert k.label == (x,y)
assert k.hilbert_space == HilbertSpace()
assert k.is_commutative == False
assert k.dual_class() == Bra
assert k.dual == Bra(x,y)
assert k.subs(x,y) == Ket(y,y)
k = TestKet()
assert k == TestKet("test")
k = TestKetMultipleLabels()
assert k == TestKetMultipleLabels("r", "theta", "phi")
assert Ket() == Ket('psi')
def test_bra():
b = Bra('0')
assert isinstance(b, Bra)
assert isinstance(b, BraBase)
assert isinstance(b, StateBase)
assert isinstance(b, QExpr)
assert b.label == (Symbol('0'),)
assert b.hilbert_space == HilbertSpace()
assert b.is_commutative == False
# Make sure this doesn't get converted to the number pi.
b = Bra('pi')
assert b.label == (Symbol('pi'),)
b = Bra(x,y)
assert b.label == (x,y)
assert b.hilbert_space == HilbertSpace()
assert b.is_commutative == False
assert b.dual_class() == Ket
assert b.dual == Ket(x,y)
assert b.subs(x,y) == Bra(y,y)
assert Bra() == Bra('psi')
def test_ops():
k0 = Ket(0)
k1 = Ket(1)
k = 2*I*k0 - (x/sqrt(2))*k1
assert k == Add(Mul(2, I, k0),
Mul(Rational(-1, 2), x, Pow(2, Rational(1, 2)), k1))
def test_time_dep_ket():
k = TimeDepKet(0,t)
assert isinstance(k, TimeDepKet)
assert isinstance(k, KetBase)
assert isinstance(k, StateBase)
assert isinstance(k, QExpr)
assert k.label == (Integer(0),)
assert k.args == (Integer(0),t)
assert k.time == t
assert k.dual_class() == TimeDepBra
assert k.dual == TimeDepBra(0,t)
assert k.subs(t,2) == TimeDepKet(0,2)
k = TimeDepKet(x, 0.5)
assert k.label == (x,)
assert k.args == (x,sympify(0.5))
k = TestTimeDepKet()
assert k.label == (Symbol("test"),)
assert k.time == Symbol("t")
assert k == TestTimeDepKet("test", "t")
k = TestTimeDepKetMultipleLabels()
assert k.label == (Symbol("r"), Symbol("theta"), Symbol("phi"))
assert k.time == Symbol("t")
assert k == TestTimeDepKetMultipleLabels("r", "theta", "phi", "t")
assert TimeDepKet() == TimeDepKet("psi", "t")
def test_time_dep_bra():
b = TimeDepBra(0,t)
assert isinstance(b, TimeDepBra)
assert isinstance(b, BraBase)
assert isinstance(b, StateBase)
assert isinstance(b, QExpr)
assert b.label == (Integer(0),)
assert b.args == (Integer(0),t)
assert b.time == t
assert b.dual_class() == TimeDepKet
assert b.dual == TimeDepKet(0,t)
k = TimeDepBra(x, 0.5)
assert k.label == (x,)
assert k.args == (x,sympify(0.5))
assert TimeDepBra() == TimeDepBra("psi", "t")
def test_bra_ket_dagger():
x = symbols('x',complex=True)
k = Ket('k')
b = Bra('b')
assert Dagger(k) == Bra('k')
assert Dagger(b) == Ket('b')
assert Dagger(k).is_commutative == False
k2 = Ket('k2')
e = 2*I*k + x*k2
assert Dagger(e) == conjugate(x)*Dagger(k2) - 2*I*Dagger(k)
def test_wavefunction():
x, L = symbols('x,L', real=True)
n = symbols('n', integer=True)
f = Wavefunction(x**2, x)
p = f.prob()
lims = f.limits
assert f.is_normalized == False
assert f.norm == oo
assert f(10) == 100
assert p(10) == 10000
assert lims[x] == (-oo, oo)
assert diff(f, x) == Wavefunction(2*x, x)
raises(NotImplementedError, 'f.normalize()')
assert conjugate(f) == Wavefunction(conjugate(f.expr), x)
assert conjugate(f) == Dagger(f)
g = Wavefunction(x**2*y+y**2*x, (x, 0, 1), (y, 0, 2))
lims_g = g.limits
assert lims_g[x] == (0, 1)
assert lims_g[y] == (0, 2)
assert g.is_normalized == False
assert g.norm == sqrt(42)/3
assert g(2,4) == 0
assert g(1,1) == 2
assert diff(diff(g, x), y) == Wavefunction(2*x + 2*y, (x, 0, 1), (y, 0, 2))
assert conjugate(g) == Wavefunction(conjugate(g.expr), *g.args[1:])
assert conjugate(g) == Dagger(g)
h = Wavefunction(sqrt(5)*x**2, (x, 0, 1))
assert h.is_normalized == True
assert h.normalize() == h
assert conjugate(h) == Wavefunction(conjugate(h.expr), (x, 0, 1))
assert conjugate(h) == Dagger(h)
piab = Wavefunction(sin(n*pi*x/L), (x, 0, L))
assert piab.norm == sqrt(L/2)
assert piab(L+1) == 0
assert piab(0.5) == sin(0.5*n*pi/L)
assert piab(0.5, n=1, L=1) == sin(0.5*pi)
assert piab.normalize() == \
Wavefunction(sqrt(2)/sqrt(L)*sin(n*pi*x/L), (x, 0, L))
assert conjugate(piab) == Wavefunction(conjugate(piab.expr), (x, 0, L))
assert conjugate(piab) == Dagger(piab)
k = Wavefunction(x**2, 'x')
assert type(k.variables[0]) == Symbol
|
|
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Repository module to handle different types of repositories."""
from __future__ import print_function
import constants
import logging
import os
import re
import shutil
from chromite.lib import cros_build_lib
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import rewrite_git_alternates
from chromite.lib import retry_util
# File that marks a buildroot as being used by a trybot
_TRYBOT_MARKER = '.trybot'
class SrcCheckOutException(Exception):
"""Exception gets thrown for failure to sync sources"""
def IsARepoRoot(directory):
"""Returns True if directory is the root of a repo checkout."""
return os.path.exists(os.path.join(directory, '.repo'))
def IsInternalRepoCheckout(root):
"""Returns whether root houses an internal 'repo' checkout."""
manifest_dir = os.path.join(root, '.repo', 'manifests')
manifest_url = git.RunGit(
manifest_dir, ['config', 'remote.origin.url']).output.strip()
return (os.path.splitext(os.path.basename(manifest_url))[0]
== os.path.splitext(os.path.basename(constants.MANIFEST_INT_URL))[0])
def CloneGitRepo(working_dir, repo_url, reference=None, bare=False,
mirror=False, depth=None, branch=None, single_branch=False):
"""Clone given git repo
Args:
working_dir: location where it should be cloned to
repo_url: git repo to clone
reference: If given, pathway to a git repository to access git objects
from. Note that the reference must exist as long as the newly created
repo is to be usable.
bare: Clone a bare checkout.
mirror: Clone a mirror checkout.
depth: If given, do a shallow clone limiting the objects pulled to just
that # of revs of history. This option is mutually exclusive to
reference.
branch: If given, clone the given branch from the parent repository.
single_branch: Clone only one the requested branch.
"""
osutils.SafeMakedirs(working_dir)
cmd = ['clone', repo_url, working_dir]
if reference:
if depth:
raise ValueError("reference and depth are mutually exclusive "
"options; please pick one or the other.")
cmd += ['--reference', reference]
if bare:
cmd += ['--bare']
if mirror:
cmd += ['--mirror']
if depth:
cmd += ['--depth', str(int(depth))]
if branch:
cmd += ['--branch', branch]
if single_branch:
cmd += ['--single-branch']
git.RunGit(working_dir, cmd)
def UpdateGitRepo(working_dir, repo_url, **kwargs):
"""Update the given git repo, blowing away any local changes.
If the repo does not exist, clone it from scratch.
Args:
working_dir: location where it should be cloned to
repo_url: git repo to clone
**kwargs: See CloneGitRepo.
"""
assert not kwargs.get('bare'), 'Bare checkouts are not supported'
if git.IsGitRepo(working_dir):
try:
git.CleanAndCheckoutUpstream(working_dir)
except cros_build_lib.RunCommandError:
cros_build_lib.Warning('Could not update %s', working_dir, exc_info=True)
shutil.rmtree(working_dir)
CloneGitRepo(working_dir, repo_url, **kwargs)
else:
CloneGitRepo(working_dir, repo_url, **kwargs)
def GetTrybotMarkerPath(buildroot):
"""Get path to trybot marker file given the buildroot."""
return os.path.join(buildroot, _TRYBOT_MARKER)
def CreateTrybotMarker(buildroot):
"""Create the file that identifies a buildroot as being used by a trybot."""
osutils.WriteFile(GetTrybotMarkerPath(buildroot), '')
def ClearBuildRoot(buildroot, preserve_paths=()):
"""Remove and recreate the buildroot while preserving the trybot marker."""
trybot_root = os.path.exists(GetTrybotMarkerPath(buildroot))
if os.path.exists(buildroot):
cmd = ['find', buildroot, '-mindepth', '1', '-maxdepth', '1']
ignores = []
for path in preserve_paths:
if ignores:
ignores.append('-a')
ignores += ['!', '-name', path]
cmd.extend(ignores)
cmd += ['-exec', 'rm', '-rf', '{}', '+']
cros_build_lib.SudoRunCommand(cmd)
else:
os.makedirs(buildroot)
if trybot_root:
CreateTrybotMarker(buildroot)
class RepoRepository(object):
"""A Class that encapsulates a repo repository.
Args:
manifest_repo_url: URL to fetch repo manifest from.
directory: local path where to checkout the repository.
branch: Branch to check out the manifest at.
referenced_repo: Repository to reference for git objects, if possible.
manifest: Which manifest.xml within the branch to use. Effectively
default.xml if not given.
depth: Mutually exclusive option to referenced_repo; this limits the
checkout to a max commit history of the given integer.
repo_url: URL to fetch repo tool from.
repo_branch: Branch to check out the repo tool at.
"""
# Use our own repo, in case android.kernel.org (the default location) is down.
_INIT_CMD = ['repo', 'init']
# If a repo hasn't been used in the last 5 runs, wipe it.
LRU_THRESHOLD = 5
def __init__(self, manifest_repo_url, directory, branch=None,
referenced_repo=None, manifest=constants.DEFAULT_MANIFEST,
depth=None, repo_url=constants.REPO_URL, repo_branch=None):
self.manifest_repo_url = manifest_repo_url
self.repo_url = repo_url
self.repo_branch = repo_branch
self.directory = directory
self.branch = branch
# It's perfectly acceptable to pass in a reference pathway that isn't
# usable. Detect it, and suppress the setting so that any depth
# settings aren't disabled due to it.
if referenced_repo is not None:
if depth is not None:
raise ValueError("referenced_repo and depth are mutually exclusive "
"options; please pick one or the other.")
if not IsARepoRoot(referenced_repo):
referenced_repo = None
self._referenced_repo = referenced_repo
self._manifest = manifest
# If the repo exists already, force a selfupdate as the first step.
self._repo_update_needed = IsARepoRoot(self.directory)
if not self._repo_update_needed and git.FindRepoDir(self.directory):
raise ValueError('Given directory %s is not the root of a repository.'
% self.directory)
self._depth = int(depth) if depth is not None else None
def _SwitchToLocalManifest(self, local_manifest):
"""Reinitializes the repository if the manifest has changed."""
logging.debug('Moving to manifest defined by %s', local_manifest)
# TODO: use upstream repo's manifest logic when we bump repo version.
manifest_path = self.GetRelativePath('.repo/manifest.xml')
os.unlink(manifest_path)
shutil.copyfile(local_manifest, manifest_path)
def Initialize(self, local_manifest=None, extra_args=()):
"""Initializes a repository. Optionally forces a local manifest.
Args:
local_manifest: The absolute path to a custom manifest to use. This will
replace .repo/manifest.xml.
extra_args: Extra args to pass to 'repo init'
"""
# Do a sanity check on the repo; if it exists and we can't pull a
# manifest from it, we know it's fairly screwed up and needs a fresh
# rebuild.
if os.path.exists(os.path.join(self.directory, '.repo', 'manifest.xml')):
try:
cros_build_lib.RunCommand(
['repo', 'manifest'], cwd=self.directory, capture_output=True)
except cros_build_lib.RunCommandError:
cros_build_lib.Warning("Wiping %r due to `repo manifest` failure",
self.directory)
paths = [os.path.join(self.directory, '.repo', x) for x in
('manifest.xml', 'manifests.git', 'manifests', 'repo')]
cros_build_lib.SudoRunCommand(['rm', '-rf'] + paths)
self._repo_update_needed = False
# Wipe local_manifest.xml if it exists- it can interfere w/ things in
# bad ways (duplicate projects, etc); we control this repository, thus
# we can destroy it.
osutils.SafeUnlink(os.path.join(self.directory, 'local_manifest.xml'))
# Force a repo self update first; during reinit, repo doesn't do the
# update itself, but we could be doing the init on a repo version less
# then v1.9.4, which didn't have proper support for doing reinit that
# involved changing the manifest branch in use; thus selfupdate.
# Additionally, if the self update fails for *any* reason, wipe the repo
# innards and force repo init to redownload it; same end result, just
# less efficient.
# Additionally, note that this method may be called multiple times;
# thus code appropriately.
if self._repo_update_needed:
try:
cros_build_lib.RunCommand(['repo', 'selfupdate'], cwd=self.directory)
except cros_build_lib.RunCommandError:
osutils.RmDir(os.path.join(self.directory, '.repo', 'repo'),
ignore_missing=True)
self._repo_update_needed = False
init_cmd = self._INIT_CMD + ['--repo-url', self.repo_url,
'--manifest-url', self.manifest_repo_url]
if self._referenced_repo:
init_cmd.extend(['--reference', self._referenced_repo])
if self._manifest:
init_cmd.extend(['--manifest-name', self._manifest])
if self._depth is not None:
init_cmd.extend(['--depth', str(self._depth)])
init_cmd.extend(extra_args)
# Handle branch / manifest options.
if self.branch:
init_cmd.extend(['--manifest-branch', self.branch])
if self.repo_branch:
init_cmd.extend(['--repo-branch', self.repo_branch])
cros_build_lib.RunCommand(init_cmd, cwd=self.directory, input='\n\ny\n')
if local_manifest and local_manifest != self._manifest:
self._SwitchToLocalManifest(local_manifest)
@property
def _ManifestConfig(self):
return os.path.join(self.directory, '.repo', 'manifests.git', 'config')
def _EnsureMirroring(self, post_sync=False):
"""Ensure git is usable from w/in the chroot if --references is enabled
repo init --references hardcodes the abspath to parent; this pathway
however isn't usable from the chroot (it doesn't exist). As such the
pathway is rewritten to use relative pathways pointing at the root of
the repo, which via I84988630 enter_chroot sets up a helper bind mount
allowing git/repo to access the actual referenced repo.
This has to be invoked prior to a repo sync of the target trybot to
fix any pathways that may have been broken by the parent repo moving
on disk, and needs to be invoked after the sync has completed to rewrite
any new project's abspath to relative.
"""
if not self._referenced_repo:
return
proj_root = os.path.join(self.directory, '.repo', 'project-objects')
if not os.path.exists(proj_root):
# Not yet synced, nothing to be done.
return
rewrite_git_alternates.RebuildRepoCheckout(self.directory,
self._referenced_repo)
if post_sync:
chroot_path = os.path.join(self._referenced_repo, '.repo', 'chroot',
'external')
chroot_path = git.ReinterpretPathForChroot(chroot_path)
rewrite_git_alternates.RebuildRepoCheckout(
self.directory, self._referenced_repo, chroot_path)
# Finally, force the git config marker that enter_chroot looks for
# to know when to do bind mounting trickery; this normally will exist,
# but if we're converting a pre-existing repo checkout, it's possible
# that it was invoked w/out the reference arg. Note this must be
# an absolute path to the source repo- enter_chroot uses that to know
# what to bind mount into the chroot.
cmd = ['config', '--file', self._ManifestConfig, 'repo.reference',
self._referenced_repo]
git.RunGit('.', cmd)
def Detach(self):
"""Detach projects back to manifest versions. Effectively a 'reset'."""
cros_build_lib.RunCommand(['repo', '--time', 'sync', '-d'],
cwd=self.directory)
def Sync(self, local_manifest=None, jobs=None, all_branches=True,
network_only=False):
"""Sync/update the source. Changes manifest if specified.
Args:
local_manifest: If true, checks out source to manifest. DEFAULT_MANIFEST
may be used to set it back to the default manifest.
jobs: May be set to override the default sync parallelism defined by
the manifest.
all_branches: If False, a repo sync -c is performed; this saves on
sync'ing via grabbing only what is needed for the manifest specified
branch. Defaults to True. TODO(davidjames): Set the default back to
False once we've fixed http://crbug.com/368722 .
network_only: If true, perform only the network half of the sync; skip
the checkout. Primarily of use to validate a manifest (although
if the manifest has bad copyfile statements, via skipping checkout
the broken copyfile tag won't be spotted), or of use when the
invoking code is fine w/ operating on bare repos, ie .repo/projects/*.
"""
try:
# Always re-initialize to the current branch.
self.Initialize(local_manifest)
# Fix existing broken mirroring configurations.
self._EnsureMirroring()
cmd = ['repo', '--time', 'sync']
if jobs:
cmd += ['--jobs', str(jobs)]
if not all_branches:
cmd.append('-c')
# Do the network half of the sync; retry as necessary to get the content.
retry_util.RunCommandWithRetries(constants.SYNC_RETRIES, cmd + ['-n'],
cwd=self.directory)
if network_only:
return
# Do the local sync; note that there is a couple of corner cases where
# the new manifest cannot transition from the old checkout cleanly-
# primarily involving git submodules. Thus we intercept, and do
# a forced wipe, then a retry.
try:
cros_build_lib.RunCommand(cmd + ['-l'], cwd=self.directory)
except cros_build_lib.RunCommandError:
manifest = git.ManifestCheckout.Cached(self.directory)
targets = set(project['path'].split('/', 1)[0]
for project in manifest.ListCheckouts())
if not targets:
# No directories to wipe, thus nothing we can fix.
raise
cros_build_lib.SudoRunCommand(['rm', '-rf'] + sorted(targets),
cwd=self.directory)
# Retry the sync now; if it fails, let the exception propagate.
cros_build_lib.RunCommand(cmd + ['-l'], cwd=self.directory)
# We do a second run to fix any new repositories created by repo to
# use relative object pathways. Note that cros_sdk also triggers the
# same cleanup- we however kick it erring on the side of caution.
self._EnsureMirroring(True)
self._DoCleanup()
except cros_build_lib.RunCommandError as e:
err_msg = e.Stringify(error=False, output=False)
logging.error(err_msg)
raise SrcCheckOutException(err_msg)
def _DoCleanup(self):
"""Wipe unused repositories."""
# Find all projects, even if they're not in the manifest. Note the find
# trickery this is done to keep it as fast as possible.
repo_path = os.path.join(self.directory, '.repo', 'projects')
current = set(cros_build_lib.RunCommand(
['find', repo_path, '-type', 'd', '-name', '*.git', '-printf', '%P\n',
'-a', '!', '-wholename', '*.git/*', '-prune'],
print_cmd=False, capture_output=True).output.splitlines())
data = {}.fromkeys(current, 0)
path = os.path.join(self.directory, '.repo', 'project.lru')
if os.path.exists(path):
existing = [x.strip().split(None, 1)
for x in osutils.ReadFile(path).splitlines()]
data.update((k, int(v)) for k, v in existing if k in current)
# Increment it all...
data.update((k, v + 1) for k, v in data.iteritems())
# Zero out what is now used.
checkouts = git.ManifestCheckout.Cached(self.directory).ListCheckouts()
data.update(('%s.git' % x['path'], 0) for x in checkouts)
# Finally... wipe anything that's greater than our threshold.
wipes = [k for k, v in data.iteritems() if v > self.LRU_THRESHOLD]
if wipes:
cros_build_lib.SudoRunCommand(
['rm', '-rf'] + [os.path.join(repo_path, proj) for proj in wipes])
map(data.pop, wipes)
osutils.WriteFile(path, "\n".join('%s %i' % x for x in data.iteritems()))
def GetRelativePath(self, path):
"""Returns full path including source directory of path in repo."""
return os.path.join(self.directory, path)
def ExportManifest(self, mark_revision=False, revisions=True):
"""Export the revision locked manifest
Args:
mark_revision: If True, then the sha1 of manifest.git is recorded
into the resultant manifest tag as a version attribute.
Specifically, if manifests.git is at 1234, <manifest> becomes
<manifest revision="1234">.
revisions: If True, then rewrite all branches/tags into a specific
sha1 revision. If False, don't.
Returns:
The manifest as a string.
"""
cmd = ['repo', 'manifest', '-o', '-']
if revisions:
cmd += ['-r']
output = cros_build_lib.RunCommand(
cmd, cwd=self.directory, print_cmd=False, capture_output=True,
extra_env={'PAGER':'cat'}).output
if not mark_revision:
return output
modified = git.RunGit(os.path.join(self.directory, '.repo/manifests'),
['rev-list', '-n1', 'HEAD'])
assert modified.output
return output.replace("<manifest>", '<manifest revision="%s">' %
modified.output.strip())
def IsManifestDifferent(self, other_manifest):
"""Checks whether this manifest is different than another.
May blacklists certain repos as part of the diff.
Args:
other_manifest: Second manifest file to compare against.
Returns:
True: If the manifests are different
False: If the manifests are same
"""
logging.debug('Calling IsManifestDifferent against %s', other_manifest)
black_list = ['="chromium/']
blacklist_pattern = re.compile(r'|'.join(black_list))
manifest_revision_pattern = re.compile(r'<manifest revision="[a-f0-9]+">',
re.I)
current = self.ExportManifest()
with open(other_manifest, 'r') as manifest2_fh:
for (line1, line2) in zip(current.splitlines(), manifest2_fh):
line1 = line1.strip()
line2 = line2.strip()
if blacklist_pattern.search(line1):
logging.debug('%s ignored %s', line1, line2)
continue
if line1 != line2:
logging.debug('Current and other manifest differ.')
logging.debug('current: "%s"', line1)
logging.debug('other : "%s"', line2)
# Ignore revision differences on the manifest line. The revision of
# the manifest.git repo is uninteresting when determining if the
# current manifest describes the same sources as the other manifest.
if manifest_revision_pattern.search(line2):
logging.debug('Ignoring difference in manifest revision.')
continue
return True
return False
|
|
import requests
import urllib3
import zlib
from vendor import readability
from simplejson.decoder import JSONDecodeError
from requests.packages.urllib3.exceptions import LocationParseError
from socket import error as SocketError
from mongoengine.queryset import NotUniqueError
from lxml.etree import ParserError
from utils import log as logging
from utils.feed_functions import timelimit, TimeoutError
from OpenSSL.SSL import Error as OpenSSLError
from pyasn1.error import PyAsn1Error
from django.utils.encoding import smart_str
from django.conf import settings
from django.utils.encoding import smart_bytes
from django.contrib.sites.models import Site
from bs4 import BeautifulSoup
from urllib.parse import urljoin
BROKEN_URLS = [
"gamespot.com",
'thedailyskip.com',
]
class TextImporter:
def __init__(self, story=None, feed=None, story_url=None, request=None, debug=False):
self.story = story
self.story_url = story_url
if self.story and not self.story_url:
self.story_url = self.story.story_permalink
self.feed = feed
self.request = request
self.debug = debug
@property
def headers(self):
num_subscribers = getattr(self.feed, 'num_subscribers', 0)
return {
'User-Agent': 'NewsBlur Content Fetcher - %s subscriber%s - %s %s' % (
num_subscribers,
's' if num_subscribers != 1 else '',
getattr(self.feed, 'permalink', ''),
getattr(self.feed, 'fake_user_agent', ''),
),
}
def fetch(self, skip_save=False, return_document=False, use_mercury=True):
if self.story_url and any(broken_url in self.story_url for broken_url in BROKEN_URLS):
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: banned")
return
if use_mercury:
results = self.fetch_mercury(skip_save=skip_save, return_document=return_document)
if not use_mercury or not results:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY with Mercury, trying readability...", warn_color=False)
results = self.fetch_manually(skip_save=skip_save, return_document=return_document)
return results
def fetch_mercury(self, skip_save=False, return_document=False):
try:
resp = self.fetch_request(use_mercury=True)
except TimeoutError:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: timed out")
resp = None
except requests.exceptions.TooManyRedirects:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: too many redirects")
resp = None
if not resp:
return
try:
doc = resp.json()
except JSONDecodeError:
doc = None
if not doc or doc.get('error', False):
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: %s" % (doc and doc.get('messages', None) or "[unknown mercury error]"))
return
text = doc['content']
title = doc['title']
url = doc['url']
image = doc['lead_image_url']
if image and ('http://' in image[1:] or 'https://' in image[1:]):
logging.user(self.request, "~SN~FRRemoving broken image from text: %s" % image)
image = None
return self.process_content(text, title, url, image, skip_save=skip_save, return_document=return_document)
def fetch_manually(self, skip_save=False, return_document=False):
try:
resp = self.fetch_request(use_mercury=False)
except TimeoutError:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: timed out")
resp = None
except requests.exceptions.TooManyRedirects:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: too many redirects")
resp = None
if not resp:
return
@timelimit(5)
def extract_text(resp):
try:
text = resp.text
except (LookupError, TypeError):
text = resp.content
return text
try:
text = extract_text(resp)
except TimeoutError:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: timed out on resp.text")
return
# if self.debug:
# logging.user(self.request, "~FBOriginal text's website: %s" % text)
# if resp.encoding and resp.encoding != 'utf-8':
# try:
# text = text.encode(resp.encoding)
# except (LookupError, UnicodeEncodeError):
# pass
if text:
text = text.replace("\xc2\xa0", " ") # Non-breaking space, is mangled when encoding is not utf-8
text = text.replace("\\u00a0", " ") # Non-breaking space, is mangled when encoding is not utf-8
original_text_doc = readability.Document(text, url=resp.url,
positive_keywords="post, entry, postProp, article, postContent, postField")
try:
content = original_text_doc.summary(html_partial=True)
except (ParserError) as e:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: %s" % e)
return
try:
title = original_text_doc.title()
except TypeError:
title = ""
url = resp.url
return self.process_content(content, title, url, image=None, skip_save=skip_save, return_document=return_document,
original_text_doc=original_text_doc)
def process_content(self, content, title, url, image, skip_save=False, return_document=False, original_text_doc=None):
original_story_content = self.story and self.story.story_content_z and zlib.decompress(self.story.story_content_z)
if not original_story_content:
original_story_content = ""
story_image_urls = self.story and self.story.image_urls
if not story_image_urls:
story_image_urls = []
content = self.add_hero_image(content, story_image_urls)
if content:
content = self.rewrite_content(content)
full_content_is_longer = False
if self.feed and self.feed.is_newsletter:
full_content_is_longer = True
elif len(content) > len(original_story_content):
full_content_is_longer = True
if content and full_content_is_longer:
if self.story and not skip_save:
self.story.original_text_z = zlib.compress(smart_bytes(content))
try:
self.story.save()
except NotUniqueError as e:
logging.user(self.request, ("~SN~FYFetched ~FGoriginal text~FY: %s" % (e)), warn_color=False)
pass
logging.user(self.request, ("~SN~FYFetched ~FGoriginal text~FY: now ~SB%s bytes~SN vs. was ~SB%s bytes" % (
len(content),
len(original_story_content)
)), warn_color=False)
else:
logging.user(self.request, ("~SN~FRFailed~FY to fetch ~FGoriginal text~FY: was ~SB%s bytes" % (
len(original_story_content)
)), warn_color=False)
return
if return_document:
return dict(content=content, title=title, url=url, doc=original_text_doc, image=image)
return content
def add_hero_image(self, content, image_urls):
# Need to have images in the original story to add to the text that may not have any images
if not len(image_urls):
return content
content_soup = BeautifulSoup(content, features="lxml")
content_imgs = content_soup.findAll('img')
for img in content_imgs:
# Since NewsBlur proxies all http images over https, the url can change, so acknowledge urls
# that are https on the original text but http on the feed
if not img.get('src'): continue
if img.get('src') in image_urls:
image_urls.remove(img.get('src'))
elif img.get('src').replace('https:', 'http:') in image_urls:
image_urls.remove(img.get('src').replace('https:', 'http:'))
if len(image_urls):
image_content = f'<img src="{image_urls[0]}">'
content = f"{image_content}\n {content}"
return content
def rewrite_content(self, content):
soup = BeautifulSoup(content, features="lxml")
for noscript in soup.findAll('noscript'):
if len(noscript.contents) > 0:
noscript.replaceWith(noscript.contents[0])
content = str(soup)
images = set([img['src'] for img in soup.findAll('img') if 'src' in img])
for image_url in images:
abs_image_url = urljoin(self.story.story_permalink, image_url)
content = content.replace(image_url, abs_image_url)
return content
@timelimit(10)
def fetch_request(self, use_mercury=True):
headers = self.headers
url = self.story_url
if use_mercury:
mercury_api_key = getattr(settings, 'MERCURY_PARSER_API_KEY', 'abc123')
headers["content-type"] = "application/json"
headers["x-api-key"] = mercury_api_key
domain = Site.objects.get_current().domain
if settings.DOCKERBUILD:
domain = 'haproxy'
url = f"https://{domain}/rss_feeds/original_text_fetcher?url={url}"
try:
r = requests.get(url, headers=headers, timeout=15)
r.connection.close()
except (AttributeError, SocketError, requests.ConnectionError,
requests.models.MissingSchema, requests.sessions.InvalidSchema,
requests.sessions.TooManyRedirects,
requests.models.InvalidURL,
requests.models.ChunkedEncodingError,
requests.models.ContentDecodingError,
requests.adapters.ReadTimeout,
urllib3.exceptions.LocationValueError,
LocationParseError, OpenSSLError, PyAsn1Error) as e:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: %s" % e)
return
return r
|
|
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPCreated
from pyramid.response import Response
from datetime import datetime
import logging
import transaction
from twonicornweb.views import (
site_layout,
get_user,
)
from twonicornweb.models import (
DBSession,
Application,
Deploy,
ArtifactType,
ArtifactAssignment,
DeploymentTimeWindow,
)
log = logging.getLogger(__name__)
def create_application(**kwargs):
try:
ss = kwargs['ss']
except:
ss = None
try:
# foo = bar()
utcnow = datetime.utcnow()
create = Application(application_name=kwargs['application_name'],
nodegroup=kwargs['nodegroup'],
updated_by=kwargs['updated_by'],
created=utcnow,
updated=utcnow)
DBSession.add(create)
DBSession.flush()
application_id = create.application_id
# Create a time window assignment
create = DeploymentTimeWindow(application_id=application_id,
day_start=kwargs['day_start'],
day_end=kwargs['day_end'],
hour_start=kwargs['hour_start'],
minute_start=kwargs['minute_start'],
hour_end=kwargs['hour_end'],
minute_end=kwargs['minute_end'],
updated_by=kwargs['updated_by'],
created=utcnow,
updated=utcnow)
DBSession.add(create)
DBSession.flush()
for i in range(len(kwargs['deploy_paths'])):
artifact_type_id = ArtifactType.get_artifact_type_id(kwargs['artifact_types'][i])
create = Deploy(application_id=application_id,
artifact_type_id=artifact_type_id.artifact_type_id,
deploy_path=kwargs['deploy_paths'][i],
package_name=kwargs['package_names'][i],
updated_by=kwargs['updated_by'],
created=utcnow,
updated=utcnow)
DBSession.add(create)
DBSession.flush()
# Have to force commit transaction for self service.
# For some reason returning isn't committing.
transaction.commit()
if ss:
return_url = '/api/application?id=%s' % (application_id)
return HTTPCreated(location=return_url)
else:
return_url = '/deploys?application_id=%s&nodegroup=%s' % (application_id, kwargs['nodegroup'])
return HTTPFound(return_url)
except Exception, e:
error_msg = ("Failed to create application: %s" % (e))
log.error(error_msg)
raise Exception(error_msg)
def edit_application(**kwargs):
# Update the app
log.info('UPDATE APP: application_id=%s,application_name=%s,nodegroup=%s,updated_by=%s'
% (kwargs['application_id'],
kwargs['application_name'],
kwargs['nodegroup'],
kwargs['updated_by']))
log.info('UPDATE TIME WINDOW: application_id=%s,day_start=%s,day_end=%s,hour_start=%s,minute_start=%s,hour_end=%s,minute_end=%s,updated_by=%s'
% (kwargs['application_id'],
kwargs['day_start'],
kwargs['day_end'],
kwargs['hour_start'],
kwargs['minute_start'],
kwargs['hour_end'],
kwargs['minute_end'],
kwargs['updated_by']))
app = DBSession.query(Application).filter(Application.application_id==kwargs['application_id']).one()
app.application_name = kwargs['application_name']
app.nodegroup = kwargs['nodegroup']
app.time_valid.day_start = kwargs['day_start']
app.time_valid.day_end = kwargs['day_end']
app.time_valid.hour_start = kwargs['hour_start']
app.time_valid.minute_start = kwargs['minute_start']
app.time_valid.hour_end = kwargs['hour_end']
app.time_valid.minute_end = kwargs['minute_end']
app.updated_by=kwargs['updated_by']
DBSession.flush()
# Add/Update deploys
for i in range(len(kwargs['deploy_paths'])):
deploy_id = None
try:
deploy_id = kwargs['deploy_ids'][i]
except:
pass
if deploy_id:
log.info('UPDATE: deploy=%s,deploy_id=%s,artifact_type=%s,deploy_path=%s,package_name=%s'
% (i,
deploy_id,
kwargs['artifact_types'][i],
kwargs['deploy_paths'][i],
kwargs['package_names'][i]))
dep = DBSession.query(Deploy).filter(Deploy.deploy_id==deploy_id).one()
artifact_type_id = ArtifactType.get_artifact_type_id(kwargs['artifact_types'][i])
dep.artifact_type_id = artifact_type_id.artifact_type_id
dep.deploy_path = kwargs['deploy_paths'][i]
dep.package_name = kwargs['package_names'][i]
dep.updated_by=kwargs['updated_by']
DBSession.flush()
else:
log.info('CREATE: deploy=%s,deploy_id=%s,artifact_type=%s,deploy_path=%s,package_name=%s'
% (i,
deploy_id,
kwargs['artifact_types'][i],
kwargs['deploy_paths'][i],
kwargs['package_names'][i]))
utcnow = datetime.utcnow()
artifact_type_id = ArtifactType.get_artifact_type_id(kwargs['artifact_types'][i])
create = Deploy(application_id=kwargs['application_id'], artifact_type_id=artifact_type_id.artifact_type_id, deploy_path=kwargs['deploy_paths'][i], package_name=kwargs['package_names'][i], updated_by=kwargs['updated_by'], created=utcnow, updated=utcnow)
DBSession.add(create)
DBSession.flush()
# FIXME: This is broken due to the relationship with artifact assignments. Needs discussion on possible solutions.
# Delete deploys
for d in kwargs['deploy_ids_delete']:
try:
log.info('DELETE Artifact assignents for deploy: application_id=%s,deploy_id=%s'
% (kwargs['application_id'], d))
aa = DBSession.query(ArtifactAssignment)
aa = aa.filter(ArtifactAssignment.deploy_id==d)
aa = aa.all()
for a in aa:
DBSession.delete(a)
log.info('DELETE Deploy: application_id=%s,deploy_id=%s'
% (kwargs['application_id'], d))
q = DBSession.query(Deploy)
q = q.filter(Deploy.deploy_id==d)
q = q.one()
DBSession.delete(q)
DBSession.flush()
except Exception as e:
log.error('Error DELETE Deploy: application_id=%s,deploy_id=%s,exception=%s'
% (kwargs['application_id'], d, str(e)))
return_url = '/deploys?application_id=%s' % (kwargs['application_id'])
return HTTPFound(return_url)
@view_config(route_name='cp_application', permission='cp', renderer='twonicornweb:templates/cp_application.pt')
def view_cp_application(request):
page_title = 'Control Panel - Application'
user = get_user(request)
params = {'mode': None,
'commit': None,
'application_id': None,
}
for p in params:
try:
params[p] = request.params[p]
except:
pass
mode = params['mode']
commit = params['commit']
application_id = params['application_id']
app = None
nodegroup = None
deploy_id = None
error_msg = None
artifact_types = None
try:
q = DBSession.query(ArtifactType)
artifact_types = q.all()
except Exception, e:
log.error("Failed to retrive data on api call (%s)" % (e))
return log.error
if mode == 'add':
subtitle = 'Add an application'
if commit:
ca = {'application_name': request.POST['application_name'],
'nodegroup': request.POST['nodegroup'],
'artifact_types': request.POST.getall('artifact_type'),
'deploy_paths': request.POST.getall('deploy_path'),
'package_names': request.POST.getall('package_name'),
'day_start': request.POST['day_start'],
'day_end': request.POST['day_end'],
'hour_start': request.POST['hour_start'],
'minute_start': request.POST['minute_start'],
'hour_end': request.POST['hour_end'],
'minute_end': request.POST['minute_end'],
'updated_by': user['login']
}
# FIXME not trapping because length is the same. - Safari only bug
if len(ca['deploy_paths']) != len(ca['artifact_types']):
error_msg = "You must select an artifact type and specify a deploy path."
else:
return create_application(**ca)
if mode == 'edit':
subtitle = 'Edit an application'
if not commit:
try:
q = DBSession.query(Application)
q = q.filter(Application.application_id == application_id)
app = q.one()
except Exception, e:
conn_err_msg = e
return Response(str(conn_err_msg), content_type='text/plain', status_int=500)
if commit:
subtitle = 'Edit an application'
if 'form.submitted' in request.POST:
ea = {'application_id': request.POST['application_id'],
'application_name': request.POST['application_name'],
'nodegroup': request.POST['nodegroup'],
'artifact_types': request.POST.getall('artifact_type'),
'deploy_ids': request.POST.getall('deploy_id'),
'deploy_ids_delete': request.POST.getall('deploy_id_delete'),
'deploy_paths': request.POST.getall('deploy_path'),
'package_names': request.POST.getall('package_name'),
'day_start': request.POST['day_start'],
'day_end': request.POST['day_end'],
'hour_start': request.POST['hour_start'],
'minute_start': request.POST['minute_start'],
'hour_end': request.POST['hour_end'],
'minute_end': request.POST['minute_end'],
'updated_by': user['login']
}
if len(ea['deploy_paths']) != len(ea['artifact_types']):
error_msg = "You must select an artifact type and specify a deploy path."
else:
# Update the app
return edit_application(**ea)
return {'layout': site_layout(),
'page_title': page_title,
'user': user,
'subtitle': subtitle,
'app': app,
'application_id': application_id,
'nodegroup': nodegroup,
'deploy_id': deploy_id,
'artifact_types': artifact_types,
'mode': mode,
'commit': commit,
'error_msg': error_msg,
}
|
|
"""Administration form for file attachment storage settings."""
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from djblets.siteconfig.forms import SiteSettingsForm
from reviewboard.admin.checks import (get_can_use_amazon_s3,
get_can_use_openstack_swift,
get_can_use_couchdb)
from reviewboard.admin.siteconfig import load_site_config
class S3StorageSettingsForm(SiteSettingsForm):
"""Settings subform for S3-based file storage."""
aws_access_key_id = forms.CharField(
label=_('Amazon AWS access key'),
help_text=_('Your Amazon AWS access key ID. This can be found in '
'the "Security Credentials" section of the AWS site.'),
required=True,
widget=forms.TextInput(attrs={'size': '40'}))
aws_secret_access_key = forms.CharField(
label=_('Amazon AWS secret access key'),
help_text=_('Your Amazon AWS secret access ID. This can be found in '
'the "Security Credentials" section of the AWS site.'),
required=True,
widget=forms.TextInput(attrs={'size': '40'}))
aws_s3_bucket_name = forms.CharField(
label=_('S3 bucket name'),
help_text=_('Bucket name inside Amazon S3.'),
required=True,
widget=forms.TextInput(attrs={'size': '40'}))
aws_calling_format = forms.ChoiceField(
label=_('Amazon AWS calling format'),
choices=(
(1, 'Path'),
(2, 'Subdomain'),
(3, 'Vanity'),
),
help_text=_('Calling format for AWS requests.'),
required=True)
# TODO: these items are consumed in the S3Storage backend, but I'm not
# totally sure what they mean, or how to let users set them via siteconfig
# (especially AWS_HEADERS, which is a dictionary). For now, defaults will
# suffice.
#
# 'aws_headers': 'AWS_HEADERS',
# 'aws_default_acl': 'AWS_DEFAULT_ACL',
# 'aws_querystring_active': 'AWS_QUERYSTRING_ACTIVE',
# 'aws_querystring_expire': 'AWS_QUERYSTRING_EXPIRE',
# 'aws_s3_secure_urls': 'AWS_S3_SECURE_URLS',
def __init__(self, *args, **kwargs):
"""Initialize the subform.
If Amazon S3 support isn't available, the form's fields will be
disabled.
Args:
*args (tuple):
Additional positional arguments for the parent class.
**kwargs (dict):
Additional keyword arguments for the parent class.
"""
super(S3StorageSettingsForm, self).__init__(*args, **kwargs)
can_use_amazon_s3, reason = get_can_use_amazon_s3()
if not can_use_amazon_s3:
self.disabled_fields['aws_access_key_id'] = True
self.disabled_fields['aws_secret_access_key'] = True
self.disabled_fields['aws_s3_bucket_name'] = True
self.disabled_fields['aws_calling_format'] = True
self.disabled_reasons['aws_access_key_id'] = reason
class Meta:
title = _('Amazon S3 Settings')
fieldsets = (
(None, {
'classes': ('wide', 'hidden'),
'fields': ('aws_access_key_id',
'aws_secret_access_key',
'aws_s3_bucket_name',
'aws_calling_format'),
}),
)
class CouchDBStorageSettingsForm(SiteSettingsForm):
"""Settings subform for CouchDB-based file storage.
Note that this is currently unused. It's here for legacy reasons and
future support.
"""
couchdb_default_server = forms.CharField(
label=_('Default server'),
help_text=_('For example, "http://couchdb.local:5984"'),
required=True)
# TODO: this is consumed in the CouchDBStorage backend, but I'm not sure
# how to let users set it via siteconfig, since it's a dictionary. Since I
# haven't tested the CouchDB backend at all, it'll just sit here for now.
#
# 'couchdb_storage_options': 'COUCHDB_STORAGE_OPTIONS',
def __init__(self, *args, **kwargs):
"""Initialize the subform.
If CouchDB support isn't available, the form's fields will be
disabled.
Args:
*args (tuple):
Additional positional arguments for the parent class.
**kwargs (dict):
Additional keyword arguments for the parent class.
"""
super(CouchDBStorageSettingsForm, self).__init__(*args, **kwargs)
can_use_couchdb, reason = get_can_use_couchdb()
if not can_use_couchdb:
self.disabled_fields['couchdb_default_server'] = True
self.disabled_reasons['couchdb_default_server'] = reason
class Meta:
title = _('CouchDB Settings')
fieldsets = (
(None, {
'classes': ('wide', 'hidden'),
'fields': ('couchdb_default_server',),
}),
)
class SwiftStorageSettingsForm(SiteSettingsForm):
"""Settings subform for OpenStack Swift-based file storage."""
swift_auth_url = forms.CharField(
label=_('Swift auth URL'),
help_text=_('The URL for the auth server, '
'e.g. http://127.0.0.1:5000/v2.0'),
required=True,
widget=forms.TextInput(attrs={'size': '40'}))
swift_username = forms.CharField(
label=_('Swift username'),
help_text=_('The username to use to authenticate, '
'e.g. system:root'),
required=True,
widget=forms.TextInput(attrs={'size': '40'}))
swift_key = forms.CharField(
label=_('Swift key'),
help_text=_('The key (password) to use to authenticate.'),
required=True,
widget=forms.TextInput(attrs={'size': '40'}))
swift_auth_version = forms.ChoiceField(
label=_('Swift auth version'),
choices=(
('1', _('1.0')),
('2', _('2.0')),
),
help_text=_('The version of the authentication protocol to use.'),
required=True)
swift_container_name = forms.CharField(
label=_('Swift container name'),
help_text=_('The container in which to store the files. '
'This container must be publicly readable.'),
required=True,
widget=forms.TextInput(attrs={'size': '40'}))
def __init__(self, *args, **kwargs):
"""Initialize the subform.
If OpenStack Swift support isn't available, the form's fields will be
disabled.
Args:
*args (tuple):
Additional positional arguments for the parent class.
**kwargs (dict):
Additional keyword arguments for the parent class.
"""
super(SwiftStorageSettingsForm, self).__init__(*args, **kwargs)
can_use_openstack_swift, reason = get_can_use_openstack_swift()
if not can_use_openstack_swift:
self.disabled_fields['swift_auth_url'] = True
self.disabled_fields['swift_username'] = True
self.disabled_fields['swift_key'] = True
self.disabled_fields['swift_auth_version'] = True
self.disabled_fields['swift_container_name'] = True
self.disabled_reasons['swift_auth_url'] = reason
class Meta:
title = _('OpenStack Swift Settings')
fieldsets = (
(None, {
'classes': ('wide', 'hidden'),
'fields': ('swift_auth_url',
'swift_username',
'swift_key',
'swift_auth_version',
'swift_container_name'),
}),
)
class StorageSettingsForm(SiteSettingsForm):
"""File storage backend settings for Review Board."""
storage_backend_id = forms.ChoiceField(
label=_('File storage method'),
choices=(
('filesystem', _('Host file system')),
('s3', _('Amazon S3')),
('swift', _('OpenStack Swift')),
),
help_text=_('Storage method and location for uploaded files, such as '
'screenshots and file attachments.'),
required=True,
widget=forms.Select(attrs={
'data-subform-group': 'storage-backend',
}))
def __init__(self, *args, **kwargs):
"""Initialize the storage settings form.
This will set up the list of available storage backend settings forms,
allowing the browser to show the appropriate settings form based on the
selected backend.
Args:
*args (tuple):
Additional positional arguments for the parent class.
**kwargs (dict):
Additional keyword arguments for the parent class.
"""
super(StorageSettingsForm, self).__init__(*args, **kwargs)
self.storage_backend_forms = {
's3': S3StorageSettingsForm(*args, **kwargs),
'swift': SwiftStorageSettingsForm(*args, **kwargs),
}
def is_valid(self):
"""Return whether the form is valid.
This will check the validity of the fields on this form and on the
selected storage backend's settings form.
Returns:
bool:
``True`` if the main settings form and storage backend's settings
form is valid. ``False`` if either form is invalid.
"""
if not super(StorageSettingsForm, self).is_valid():
return False
backend_id = self.cleaned_data['storage_backend_id']
backend_form = self.storage_backend_forms.get(backend_id)
return backend_form is None or backend_form.is_valid()
def save(self):
"""Save the form.
This will write the new configuration to the database. It will then
force a site configuration reload.
"""
super(StorageSettingsForm, self).save()
backend_id = self.cleaned_data['storage_backend_id']
if backend_id in self.storage_backend_forms:
backend_form = self.storage_backend_forms[backend_id]
backend_form.save()
load_site_config()
class Meta:
title = _('File Storage Settings')
subforms = (
{
'subforms_attr': 'storage_backend_forms',
'controller_field': 'storage_backend_id',
},
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Attributes(msrest.serialization.Model):
"""The object attributes managed by the KeyVault service.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
not_before: Optional[datetime.datetime] = None,
expires: Optional[datetime.datetime] = None,
**kwargs
):
super(Attributes, self).__init__(**kwargs)
self.enabled = enabled
self.not_before = not_before
self.expires = expires
self.created = None
self.updated = None
class BackupSecretResult(msrest.serialization.Model):
"""The backup secret result, containing the backup blob.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The backup blob containing the backed up secret.
:vartype value: bytes
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'base64'},
}
def __init__(
self,
**kwargs
):
super(BackupSecretResult, self).__init__(**kwargs)
self.value = None
class SecretBundle(msrest.serialization.Model):
"""A secret consisting of a value, id and its attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The secret value.
:type value: str
:param id: The secret id.
:type id: str
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_2.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:ivar kid: If this is a secret backing a KV certificate, then this field specifies the
corresponding key backing the KV certificate.
:vartype kid: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a secret
backing a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'kid': {'readonly': True},
'managed': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'kid': {'key': 'kid', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(
self,
*,
value: Optional[str] = None,
id: Optional[str] = None,
content_type: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(SecretBundle, self).__init__(**kwargs)
self.value = value
self.id = id
self.content_type = content_type
self.attributes = attributes
self.tags = tags
self.kid = None
self.managed = None
class DeletedSecretBundle(SecretBundle):
"""A Deleted Secret consisting of its previous id, attributes and its tags, as well as information on when it will be purged.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The secret value.
:type value: str
:param id: The secret id.
:type id: str
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_2.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:ivar kid: If this is a secret backing a KV certificate, then this field specifies the
corresponding key backing the KV certificate.
:vartype kid: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a secret
backing a certificate, then managed will be true.
:vartype managed: bool
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
secret.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the secret is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the secret was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'kid': {'readonly': True},
'managed': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'kid': {'key': 'kid', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
*,
value: Optional[str] = None,
id: Optional[str] = None,
content_type: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
recovery_id: Optional[str] = None,
**kwargs
):
super(DeletedSecretBundle, self).__init__(value=value, id=id, content_type=content_type, attributes=attributes, tags=tags, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
class SecretItem(msrest.serialization.Model):
"""The secret item containing secret metadata.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Secret identifier.
:type id: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_2.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a key backing
a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'managed': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(
self,
*,
id: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
content_type: Optional[str] = None,
**kwargs
):
super(SecretItem, self).__init__(**kwargs)
self.id = id
self.attributes = attributes
self.tags = tags
self.content_type = content_type
self.managed = None
class DeletedSecretItem(SecretItem):
"""The deleted secret item containing metadata about the deleted secret.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Secret identifier.
:type id: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_2.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a key backing
a certificate, then managed will be true.
:vartype managed: bool
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
secret.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the secret is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the secret was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'managed': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
*,
id: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
content_type: Optional[str] = None,
recovery_id: Optional[str] = None,
**kwargs
):
super(DeletedSecretItem, self).__init__(id=id, attributes=attributes, tags=tags, content_type=content_type, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
class DeletedSecretListResult(msrest.serialization.Model):
"""The deleted secret list result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of the deleted secrets in the vault along
with a link to the next page of deleted secrets.
:vartype value: list[~azure.keyvault.v7_2.models.DeletedSecretItem]
:ivar next_link: The URL to get the next set of deleted secrets.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DeletedSecretItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeletedSecretListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Error(msrest.serialization.Model):
"""The key vault server error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar inner_error: The key vault server error.
:vartype inner_error: ~azure.keyvault.v7_2.models.Error
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'inner_error': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'inner_error': {'key': 'innererror', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = None
self.message = None
self.inner_error = None
class KeyVaultError(msrest.serialization.Model):
"""The key vault error exception.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: The key vault server error.
:vartype error: ~azure.keyvault.v7_2.models.Error
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultError, self).__init__(**kwargs)
self.error = None
class SecretAttributes(Attributes):
"""The secret management attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
:ivar recoverable_days: softDelete data retention days. Value should be >=7 and <=90 when
softDelete enabled, otherwise 0.
:vartype recoverable_days: int
:ivar recovery_level: Reflects the deletion recovery level currently in effect for secrets in
the current vault. If it contains 'Purgeable', the secret can be permanently deleted by a
privileged user; otherwise, only the system can purge the secret, at the end of the retention
interval. Possible values include: "Purgeable", "Recoverable+Purgeable", "Recoverable",
"Recoverable+ProtectedSubscription", "CustomizedRecoverable+Purgeable",
"CustomizedRecoverable", "CustomizedRecoverable+ProtectedSubscription".
:vartype recovery_level: str or ~azure.keyvault.v7_2.models.DeletionRecoveryLevel
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
'recoverable_days': {'readonly': True},
'recovery_level': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
'recoverable_days': {'key': 'recoverableDays', 'type': 'int'},
'recovery_level': {'key': 'recoveryLevel', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
not_before: Optional[datetime.datetime] = None,
expires: Optional[datetime.datetime] = None,
**kwargs
):
super(SecretAttributes, self).__init__(enabled=enabled, not_before=not_before, expires=expires, **kwargs)
self.recoverable_days = None
self.recovery_level = None
class SecretListResult(msrest.serialization.Model):
"""The secret list result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of secrets in the key vault along with a link
to the next page of secrets.
:vartype value: list[~azure.keyvault.v7_2.models.SecretItem]
:ivar next_link: The URL to get the next set of secrets.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SecretItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecretListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SecretProperties(msrest.serialization.Model):
"""Properties of the key backing a certificate.
:param content_type: The media type (MIME type).
:type content_type: str
"""
_attribute_map = {
'content_type': {'key': 'contentType', 'type': 'str'},
}
def __init__(
self,
*,
content_type: Optional[str] = None,
**kwargs
):
super(SecretProperties, self).__init__(**kwargs)
self.content_type = content_type
class SecretRestoreParameters(msrest.serialization.Model):
"""The secret restore parameters.
All required parameters must be populated in order to send to Azure.
:param secret_bundle_backup: Required. The backup blob associated with a secret bundle.
:type secret_bundle_backup: bytes
"""
_validation = {
'secret_bundle_backup': {'required': True},
}
_attribute_map = {
'secret_bundle_backup': {'key': 'value', 'type': 'base64'},
}
def __init__(
self,
*,
secret_bundle_backup: bytes,
**kwargs
):
super(SecretRestoreParameters, self).__init__(**kwargs)
self.secret_bundle_backup = secret_bundle_backup
class SecretSetParameters(msrest.serialization.Model):
"""The secret set parameters.
All required parameters must be populated in order to send to Azure.
:param value: Required. The value of the secret.
:type value: str
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:param secret_attributes: The secret management attributes.
:type secret_attributes: ~azure.keyvault.v7_2.models.SecretAttributes
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'secret_attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
}
def __init__(
self,
*,
value: str,
tags: Optional[Dict[str, str]] = None,
content_type: Optional[str] = None,
secret_attributes: Optional["SecretAttributes"] = None,
**kwargs
):
super(SecretSetParameters, self).__init__(**kwargs)
self.value = value
self.tags = tags
self.content_type = content_type
self.secret_attributes = secret_attributes
class SecretUpdateParameters(msrest.serialization.Model):
"""The secret update parameters.
:param content_type: Type of the secret value such as a password.
:type content_type: str
:param secret_attributes: The secret management attributes.
:type secret_attributes: ~azure.keyvault.v7_2.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_attribute_map = {
'content_type': {'key': 'contentType', 'type': 'str'},
'secret_attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
content_type: Optional[str] = None,
secret_attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(SecretUpdateParameters, self).__init__(**kwargs)
self.content_type = content_type
self.secret_attributes = secret_attributes
self.tags = tags
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet import sleep, Timeout
from eventlet.green import httplib, socket
import json
import six
from six.moves import range
from six.moves import urllib
import struct
from sys import exc_info, exit
import zlib
from time import gmtime, strftime, time
from zlib import compressobj
from swift.common.constraints import AUTO_CREATE_ACCOUNT_PREFIX
from swift.common.exceptions import ClientException
from swift.common.http import (HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES,
is_client_error, is_server_error)
from swift.common.request_helpers import USE_REPLICATION_NETWORK_HEADER
from swift.common.swob import Request, bytes_to_wsgi
from swift.common.utils import quote, closing_if_possible
from swift.common.wsgi import loadapp, pipeline_property
if six.PY3:
from eventlet.green.urllib import request as urllib2
else:
from eventlet.green import urllib2
class UnexpectedResponse(Exception):
"""
Exception raised on invalid responses to InternalClient.make_request().
:param message: Exception message.
:param resp: The unexpected response.
"""
def __init__(self, message, resp):
super(UnexpectedResponse, self).__init__(message)
self.resp = resp
class CompressingFileReader(object):
"""
Wrapper for file object to compress object while reading.
Can be used to wrap file objects passed to InternalClient.upload_object().
Used in testing of InternalClient.
:param file_obj: File object to wrap.
:param compresslevel: Compression level, defaults to 9.
:param chunk_size: Size of chunks read when iterating using object,
defaults to 4096.
"""
def __init__(self, file_obj, compresslevel=9, chunk_size=4096):
self._f = file_obj
self.compresslevel = compresslevel
self.chunk_size = chunk_size
self.set_initial_state()
def set_initial_state(self):
"""
Sets the object to the state needed for the first read.
"""
self._f.seek(0)
self._compressor = compressobj(
self.compresslevel, zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
self.done = False
self.first = True
self.crc32 = 0
self.total_size = 0
def read(self, *a, **kw):
"""
Reads a chunk from the file object.
Params are passed directly to the underlying file object's read().
:returns: Compressed chunk from file object.
"""
if self.done:
return b''
x = self._f.read(*a, **kw)
if x:
self.crc32 = zlib.crc32(x, self.crc32) & 0xffffffff
self.total_size += len(x)
compressed = self._compressor.compress(x)
if not compressed:
compressed = self._compressor.flush(zlib.Z_SYNC_FLUSH)
else:
compressed = self._compressor.flush(zlib.Z_FINISH)
crc32 = struct.pack("<L", self.crc32 & 0xffffffff)
size = struct.pack("<L", self.total_size & 0xffffffff)
footer = crc32 + size
compressed += footer
self.done = True
if self.first:
self.first = False
header = b'\037\213\010\000\000\000\000\000\002\377'
compressed = header + compressed
return compressed
def __iter__(self):
return self
def __next__(self):
chunk = self.read(self.chunk_size)
if chunk:
return chunk
raise StopIteration
next = __next__
def seek(self, offset, whence=0):
if not (offset == 0 and whence == 0):
raise NotImplementedError('Seek implemented on offset 0 only')
self.set_initial_state()
class InternalClient(object):
"""
An internal client that uses a swift proxy app to make requests to Swift.
This client will exponentially slow down for retries.
:param conf_path: Full path to proxy config.
:param user_agent: User agent to be sent to requests to Swift.
:param request_tries: Number of tries before InternalClient.make_request()
gives up.
"""
def __init__(self, conf_path, user_agent, request_tries,
allow_modify_pipeline=False, use_replication_network=False):
if request_tries < 1:
raise ValueError('request_tries must be positive')
self.app = loadapp(conf_path,
allow_modify_pipeline=allow_modify_pipeline)
self.user_agent = user_agent
self.request_tries = request_tries
self.use_replication_network = use_replication_network
get_object_ring = pipeline_property('get_object_ring')
container_ring = pipeline_property('container_ring')
account_ring = pipeline_property('account_ring')
auto_create_account_prefix = pipeline_property(
'auto_create_account_prefix', default=AUTO_CREATE_ACCOUNT_PREFIX)
def make_request(
self, method, path, headers, acceptable_statuses, body_file=None,
params=None):
"""Makes a request to Swift with retries.
:param method: HTTP method of request.
:param path: Path of request.
:param headers: Headers to be sent with request.
:param acceptable_statuses: List of acceptable statuses for request.
:param body_file: Body file to be passed along with request,
defaults to None.
:param params: A dict of params to be set in request query string,
defaults to None.
:returns: Response object on success.
:raises UnexpectedResponse: Exception raised when make_request() fails
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = dict(headers)
headers['user-agent'] = self.user_agent
headers.setdefault('x-backend-allow-reserved-names', 'true')
if self.use_replication_network:
headers.setdefault(USE_REPLICATION_NETWORK_HEADER, 'true')
for attempt in range(self.request_tries):
resp = exc_type = exc_value = exc_traceback = None
req = Request.blank(
path, environ={'REQUEST_METHOD': method}, headers=headers)
if body_file is not None:
if hasattr(body_file, 'seek'):
body_file.seek(0)
req.body_file = body_file
if params:
req.params = params
try:
resp = req.get_response(self.app)
except (Exception, Timeout):
exc_type, exc_value, exc_traceback = exc_info()
else:
if resp.status_int in acceptable_statuses or \
resp.status_int // 100 in acceptable_statuses:
return resp
elif not is_server_error(resp.status_int):
# No sense retrying when we expect the same result
break
# sleep only between tries, not after each one
if attempt < self.request_tries - 1:
if resp:
# always close any resp.app_iter before we discard it
with closing_if_possible(resp.app_iter):
# for non 2XX requests it's safe and useful to drain
# the response body so we log the correct status code
if resp.status_int // 100 != 2:
for iter_body in resp.app_iter:
pass
sleep(2 ** (attempt + 1))
if resp:
msg = 'Unexpected response: %s' % resp.status
if resp.status_int // 100 != 2 and resp.body:
# provide additional context (and drain the response body) for
# non 2XX responses
msg += ' (%s)' % resp.body
raise UnexpectedResponse(msg, resp)
if exc_type:
# To make pep8 tool happy, in place of raise t, v, tb:
six.reraise(exc_type, exc_value, exc_traceback)
def _get_metadata(
self, path, metadata_prefix='', acceptable_statuses=(2,),
headers=None, params=None):
"""
Gets metadata by doing a HEAD on a path and using the metadata_prefix
to get values from the headers returned.
:param path: Path to do HEAD on.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:param headers: extra headers to send
:returns: A dict of metadata with metadata_prefix stripped from keys.
Keys will be lowercase.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
resp = self.make_request('HEAD', path, headers, acceptable_statuses,
params=params)
metadata_prefix = metadata_prefix.lower()
metadata = {}
for k, v in resp.headers.items():
if k.lower().startswith(metadata_prefix):
metadata[k[len(metadata_prefix):].lower()] = v
return metadata
def _iter_items(
self, path, marker='', end_marker='', prefix='',
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns an iterator of items from a json listing. Assumes listing has
'name' key defined and uses markers.
:param path: Path to do GET on.
:param marker: Prefix of first desired item, defaults to ''.
:param end_marker: Last item returned will be 'less' than this,
defaults to ''.
:param prefix: Prefix of items
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
if not isinstance(marker, bytes):
marker = marker.encode('utf8')
if not isinstance(end_marker, bytes):
end_marker = end_marker.encode('utf8')
if not isinstance(prefix, bytes):
prefix = prefix.encode('utf8')
while True:
resp = self.make_request(
'GET', '%s?format=json&marker=%s&end_marker=%s&prefix=%s' %
(path, bytes_to_wsgi(quote(marker)),
bytes_to_wsgi(quote(end_marker)),
bytes_to_wsgi(quote(prefix))),
{}, acceptable_statuses)
if not resp.status_int == 200:
if resp.status_int >= HTTP_MULTIPLE_CHOICES:
b''.join(resp.app_iter)
break
data = json.loads(resp.body)
if not data:
break
for item in data:
yield item
marker = data[-1]['name'].encode('utf8')
def make_path(self, account, container=None, obj=None):
"""
Returns a swift path for a request quoting and utf-8 encoding the path
parts as need be.
:param account: swift account
:param container: container, defaults to None
:param obj: object, defaults to None
:raises ValueError: Is raised if obj is specified and container is
not.
"""
path = '/v1/%s' % quote(account)
if container:
path += '/%s' % quote(container)
if obj:
path += '/%s' % quote(obj)
elif obj:
raise ValueError('Object specified without container')
return path
def _set_metadata(
self, path, metadata, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Sets metadata on path using metadata_prefix to set values in headers of
POST request.
:param path: Path to do POST on.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = {}
for k, v in metadata.items():
if k.lower().startswith(metadata_prefix):
headers[k] = v
else:
headers['%s%s' % (metadata_prefix, k)] = v
self.make_request('POST', path, headers, acceptable_statuses)
# account methods
def iter_containers(
self, account, marker='', end_marker='', prefix='',
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns an iterator of containers dicts from an account.
:param account: Account on which to do the container listing.
:param marker: Prefix of first desired item, defaults to ''.
:param end_marker: Last item returned will be 'less' than this,
defaults to ''.
:param prefix: Prefix of containers
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
return self._iter_items(path, marker, end_marker, prefix,
acceptable_statuses)
def create_account(self, account):
"""
Creates an account.
:param account: Account to create.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
self.make_request('PUT', path, {}, (201, 202))
def delete_account(self, account, acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Deletes an account.
:param account: Account to delete.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
self.make_request('DELETE', path, {}, acceptable_statuses)
def get_account_info(
self, account, acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns (container_count, object_count) for an account.
:param account: Account on which to get the information.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
resp = self.make_request('HEAD', path, {}, acceptable_statuses)
if not resp.status_int // 100 == 2:
return (0, 0)
return (int(resp.headers.get('x-account-container-count', 0)),
int(resp.headers.get('x-account-object-count', 0)))
def get_account_metadata(
self, account, metadata_prefix='', acceptable_statuses=(2,),
params=None):
"""Gets account metadata.
:param account: Account on which to get the metadata.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:returns: Returns dict of account metadata. Keys will be lowercase.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
return self._get_metadata(path, metadata_prefix, acceptable_statuses,
headers=None, params=params)
def set_account_metadata(
self, account, metadata, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Sets account metadata. A call to this will add to the account
metadata and not overwrite all of it with values in the metadata dict.
To clear an account metadata value, pass an empty string as
the value for the key in the metadata dict.
:param account: Account on which to get the metadata.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
self._set_metadata(
path, metadata, metadata_prefix, acceptable_statuses)
# container methods
def container_exists(self, account, container):
"""Checks to see if a container exists.
:param account: The container's account.
:param container: Container to check.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
:returns: True if container exists, false otherwise.
"""
path = self.make_path(account, container)
resp = self.make_request('HEAD', path, {}, (2, HTTP_NOT_FOUND))
return not resp.status_int == HTTP_NOT_FOUND
def create_container(
self, account, container, headers=None, acceptable_statuses=(2,)):
"""
Creates container.
:param account: The container's account.
:param container: Container to create.
:param headers: Defaults to empty dict.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
path = self.make_path(account, container)
self.make_request('PUT', path, headers, acceptable_statuses)
def delete_container(
self, account, container, headers=None,
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Deletes a container.
:param account: The container's account.
:param container: Container to delete.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
path = self.make_path(account, container)
self.make_request('DELETE', path, headers, acceptable_statuses)
def get_container_metadata(
self, account, container, metadata_prefix='',
acceptable_statuses=(2,), params=None):
"""Gets container metadata.
:param account: The container's account.
:param container: Container to get metadata on.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:returns: Returns dict of container metadata. Keys will be lowercase.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
return self._get_metadata(path, metadata_prefix, acceptable_statuses,
params=params)
def iter_objects(
self, account, container, marker='', end_marker='', prefix='',
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns an iterator of object dicts from a container.
:param account: The container's account.
:param container: Container to iterate objects on.
:param marker: Prefix of first desired item, defaults to ''.
:param end_marker: Last item returned will be 'less' than this,
defaults to ''.
:param prefix: Prefix of objects
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
return self._iter_items(path, marker, end_marker, prefix,
acceptable_statuses)
def set_container_metadata(
self, account, container, metadata, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Sets container metadata. A call to this will add to the container
metadata and not overwrite all of it with values in the metadata dict.
To clear a container metadata value, pass an empty string as the value
for the key in the metadata dict.
:param account: The container's account.
:param container: Container to set metadata on.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
self._set_metadata(
path, metadata, metadata_prefix, acceptable_statuses)
# object methods
def delete_object(
self, account, container, obj,
acceptable_statuses=(2, HTTP_NOT_FOUND),
headers=None):
"""
Deletes an object.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:param headers: extra headers to send with request
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container, obj)
resp = self.make_request('DELETE', path, (headers or {}),
acceptable_statuses)
# Drain the response body to prevent unexpected disconnect
# in proxy-server
with closing_if_possible(resp.app_iter):
for iter_body in resp.app_iter:
pass
def get_object_metadata(
self, account, container, obj, metadata_prefix='',
acceptable_statuses=(2,), headers=None, params=None):
"""Gets object metadata.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:param headers: extra headers to send with request
:returns: Dict of object metadata.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container, obj)
return self._get_metadata(path, metadata_prefix, acceptable_statuses,
headers=headers, params=params)
def get_object(self, account, container, obj, headers=None,
acceptable_statuses=(2,), params=None):
"""
Gets an object.
:param account: The object's account.
:param container: The object's container.
:param obj: The object name.
:param headers: Headers to send with request, defaults to empty dict.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:param params: A dict of params to be set in request query string,
defaults to None.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
:returns: A 3-tuple (status, headers, iterator of object body)
"""
headers = headers or {}
path = self.make_path(account, container, obj)
resp = self.make_request(
'GET', path, headers, acceptable_statuses, params=params)
return (resp.status_int, resp.headers, resp.app_iter)
def iter_object_lines(
self, account, container, obj, headers=None,
acceptable_statuses=(2,)):
"""
Returns an iterator of object lines from an uncompressed or compressed
text object.
Uncompress object as it is read if the object's name ends with '.gz'.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
path = self.make_path(account, container, obj)
resp = self.make_request('GET', path, headers, acceptable_statuses)
if not resp.status_int // 100 == 2:
return
last_part = b''
compressed = obj.endswith('.gz')
# magic in the following zlib.decompressobj argument is courtesy of
# Python decompressing gzip chunk-by-chunk
# http://stackoverflow.com/questions/2423866
d = zlib.decompressobj(16 + zlib.MAX_WBITS)
for chunk in resp.app_iter:
if compressed:
chunk = d.decompress(chunk)
parts = chunk.split(b'\n')
if len(parts) == 1:
last_part = last_part + parts[0]
else:
parts[0] = last_part + parts[0]
for part in parts[:-1]:
yield part
last_part = parts[-1]
if last_part:
yield last_part
def set_object_metadata(
self, account, container, obj, metadata,
metadata_prefix='', acceptable_statuses=(2,)):
"""
Sets an object's metadata. The object's metadata will be overwritten
by the values in the metadata dict.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container, obj)
self._set_metadata(
path, metadata, metadata_prefix, acceptable_statuses)
def upload_object(
self, fobj, account, container, obj, headers=None,
acceptable_statuses=(2,), params=None):
"""
:param fobj: File object to read object's content from.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param headers: Headers to send with request, defaults to empty dict.
:param acceptable_statuses: List of acceptable statuses for request.
:param params: A dict of params to be set in request query string,
defaults to None.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = dict(headers or {})
if 'Content-Length' not in headers:
headers['Transfer-Encoding'] = 'chunked'
path = self.make_path(account, container, obj)
self.make_request('PUT', path, headers, acceptable_statuses, fobj,
params=params)
def get_auth(url, user, key, auth_version='1.0', **kwargs):
if auth_version != '1.0':
exit('ERROR: swiftclient missing, only auth v1.0 supported')
req = urllib2.Request(url)
req.add_header('X-Auth-User', user)
req.add_header('X-Auth-Key', key)
conn = urllib2.urlopen(req)
headers = conn.info()
return (
headers.getheader('X-Storage-Url'),
headers.getheader('X-Auth-Token'))
class SimpleClient(object):
"""
Simple client that is used in bin/swift-dispersion-* and container sync
"""
def __init__(self, url=None, token=None, starting_backoff=1,
max_backoff=5, retries=5):
self.url = url
self.token = token
self.attempts = 0 # needed in swif-dispersion-populate
self.starting_backoff = starting_backoff
self.max_backoff = max_backoff
self.retries = retries
def base_request(self, method, container=None, name=None, prefix=None,
headers=None, proxy=None, contents=None,
full_listing=None, logger=None, additional_info=None,
timeout=None, marker=None):
# Common request method
trans_start = time()
url = self.url
if full_listing:
info, body_data = self.base_request(
method, container, name, prefix, headers, proxy,
timeout=timeout, marker=marker)
listing = body_data
while listing:
marker = listing[-1]['name']
info, listing = self.base_request(
method, container, name, prefix, headers, proxy,
timeout=timeout, marker=marker)
if listing:
body_data.extend(listing)
return [info, body_data]
if headers is None:
headers = {}
if self.token:
headers['X-Auth-Token'] = self.token
if container:
url = '%s/%s' % (url.rstrip('/'), quote(container))
if name:
url = '%s/%s' % (url.rstrip('/'), quote(name))
else:
params = ['format=json']
if prefix:
params.append('prefix=%s' % prefix)
if marker:
params.append('marker=%s' % quote(marker))
url += '?' + '&'.join(params)
req = urllib2.Request(url, headers=headers, data=contents)
if proxy:
proxy = urllib.parse.urlparse(proxy)
req.set_proxy(proxy.netloc, proxy.scheme)
req.get_method = lambda: method
conn = urllib2.urlopen(req, timeout=timeout)
body = conn.read()
info = conn.info()
try:
body_data = json.loads(body)
except ValueError:
body_data = None
trans_stop = time()
if logger:
sent_content_length = 0
for n, v in headers.items():
nl = n.lower()
if nl == 'content-length':
try:
sent_content_length = int(v)
break
except ValueError:
pass
logger.debug("-> " + " ".join(
quote(str(x) if x else "-", ":/")
for x in (
strftime('%Y-%m-%dT%H:%M:%S', gmtime(trans_stop)),
method,
url,
conn.getcode(),
sent_content_length,
info['content-length'],
trans_start,
trans_stop,
trans_stop - trans_start,
additional_info
)))
return [info, body_data]
def retry_request(self, method, **kwargs):
retries = kwargs.pop('retries', self.retries)
self.attempts = 0
backoff = self.starting_backoff
while self.attempts <= retries:
self.attempts += 1
try:
return self.base_request(method, **kwargs)
except urllib2.HTTPError as err:
if is_client_error(err.getcode() or 500):
raise ClientException('Client error',
http_status=err.getcode())
elif self.attempts > retries:
raise ClientException('Raise too many retries',
http_status=err.getcode())
except (socket.error, httplib.HTTPException, urllib2.URLError):
if self.attempts > retries:
raise
sleep(backoff)
backoff = min(backoff * 2, self.max_backoff)
def get_account(self, *args, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('GET', **kwargs)
def put_container(self, container, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('PUT', container=container, **kwargs)
def get_container(self, container, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('GET', container=container, **kwargs)
def put_object(self, container, name, contents, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('PUT', container=container, name=name,
contents=contents.read(), **kwargs)
def head_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
return client.retry_request('HEAD', **kwargs)
def put_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
client.retry_request('PUT', **kwargs)
def delete_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
client.retry_request('DELETE', **kwargs)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from heatclient import exc
from oslo_log import log as logging
import six
from testtools import matchers
from heat_integrationtests.common import test
from heat_integrationtests.functional import functional_base
LOG = logging.getLogger(__name__)
class AutoscalingGroupTest(functional_base.FunctionalTestsBase):
template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to create multiple instances.",
"Parameters" : {"size": {"Type": "String", "Default": "1"},
"AZ": {"Type": "String", "Default": "nova"},
"image": {"Type": "String"},
"flavor": {"Type": "String"}},
"Resources": {
"JobServerGroup": {
"Type" : "AWS::AutoScaling::AutoScalingGroup",
"Properties" : {
"AvailabilityZones" : [{"Ref": "AZ"}],
"LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
"MinSize" : {"Ref": "size"},
"MaxSize" : "20"
}
},
"JobServerConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Metadata": {"foo": "bar"},
"Properties": {
"ImageId" : {"Ref": "image"},
"InstanceType" : {"Ref": "flavor"},
"SecurityGroups" : [ "sg-1" ],
"UserData" : "jsconfig data"
}
}
},
"Outputs": {
"InstanceList": {"Value": {
"Fn::GetAtt": ["JobServerGroup", "InstanceList"]}},
"JobServerConfigRef": {"Value": {
"Ref": "JobServerConfig"}}
}
}
'''
instance_template = '''
heat_template_version: 2013-05-23
parameters:
ImageId: {type: string}
InstanceType: {type: string}
SecurityGroups: {type: comma_delimited_list}
UserData: {type: string}
Tags: {type: comma_delimited_list, default: "x,y"}
resources:
random1:
type: OS::Heat::RandomString
properties:
salt: {get_param: ImageId}
outputs:
PublicIp: {value: {get_attr: [random1, value]}}
AvailabilityZone: {value: 'not-used11'}
PrivateDnsName: {value: 'not-used12'}
PublicDnsName: {value: 'not-used13'}
PrivateIp: {value: 'not-used14'}
'''
# This is designed to fail.
bad_instance_template = '''
heat_template_version: 2013-05-23
parameters:
ImageId: {type: string}
InstanceType: {type: string}
SecurityGroups: {type: comma_delimited_list}
UserData: {type: string}
Tags: {type: comma_delimited_list, default: "x,y"}
resources:
random1:
type: OS::Heat::RandomString
depends_on: waiter
ready_poster:
type: AWS::CloudFormation::WaitConditionHandle
waiter:
type: AWS::CloudFormation::WaitCondition
properties:
Handle: {get_resource: ready_poster}
Timeout: 1
outputs:
PublicIp:
value: {get_attr: [random1, value]}
'''
def setUp(self):
super(AutoscalingGroupTest, self).setUp()
if not self.conf.image_ref:
raise self.skipException("No image configured to test")
if not self.conf.minimal_image_ref:
raise self.skipException("No minimal image configured to test")
if not self.conf.instance_type:
raise self.skipException("No flavor configured to test")
def assert_instance_count(self, stack, expected_count):
inst_list = self._stack_output(stack, 'InstanceList')
self.assertEqual(expected_count, len(inst_list.split(',')))
def _assert_instance_state(self, nested_identifier,
num_complete, num_failed):
for res in self.client.resources.list(nested_identifier):
if 'COMPLETE' in res.resource_status:
num_complete = num_complete - 1
elif 'FAILED' in res.resource_status:
num_failed = num_failed - 1
self.assertEqual(0, num_failed)
self.assertEqual(0, num_complete)
class AutoscalingGroupBasicTest(AutoscalingGroupTest):
def test_basic_create_works(self):
"""Make sure the working case is good.
Note this combines test_override_aws_ec2_instance into this test as
well, which is:
If AWS::EC2::Instance is overridden, AutoScalingGroup will
automatically use that overridden resource type.
"""
files = {'provider.yaml': self.instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 4,
'image': self.conf.image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
files=files, environment=env)
initial_resources = {
'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
stack = self.client.stacks.get(stack_identifier)
self.assert_instance_count(stack, 4)
def test_size_updates_work(self):
files = {'provider.yaml': self.instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 2,
'image': self.conf.image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
files=files,
environment=env)
stack = self.client.stacks.get(stack_identifier)
self.assert_instance_count(stack, 2)
# Increase min size to 5
env2 = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 5,
'image': self.conf.image_ref,
'flavor': self.conf.instance_type}}
self.update_stack(stack_identifier, self.template,
environment=env2, files=files)
stack = self.client.stacks.get(stack_identifier)
self.assert_instance_count(stack, 5)
def test_update_group_replace(self):
"""Test case for ensuring non-updatable props case a replacement.
Make sure that during a group update the non-updatable
properties cause a replacement.
"""
files = {'provider.yaml': self.instance_template}
env = {'resource_registry':
{'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': '1',
'image': self.conf.image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
files=files,
environment=env)
rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
orig_asg_id = rsrc.physical_resource_id
env2 = {'resource_registry':
{'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': '1',
'AZ': 'wibble',
'image': self.conf.image_ref,
'flavor': self.conf.instance_type}}
self.update_stack(stack_identifier, self.template,
environment=env2, files=files)
# replacement will cause the resource physical_resource_id to change.
rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
self.assertNotEqual(orig_asg_id, rsrc.physical_resource_id)
def test_create_instance_error_causes_group_error(self):
"""Test create failing a resource in the instance group.
If a resource in an instance group fails to be created, the instance
group itself will fail and the broken inner resource will remain.
"""
stack_name = self._stack_rand_name()
files = {'provider.yaml': self.bad_instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 2,
'image': self.conf.image_ref,
'flavor': self.conf.instance_type}}
self.client.stacks.create(
stack_name=stack_name,
template=self.template,
files=files,
disable_rollback=True,
parameters={},
environment=env
)
self.addCleanup(self._stack_delete, stack_name)
stack = self.client.stacks.get(stack_name)
stack_identifier = '%s/%s' % (stack_name, stack.id)
self._wait_for_stack_status(stack_identifier, 'CREATE_FAILED')
initial_resources = {
'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
nested_ident = self.assert_resource_is_a_stack(stack_identifier,
'JobServerGroup')
self._assert_instance_state(nested_ident, 0, 2)
def test_update_instance_error_causes_group_error(self):
"""Test update failing a resource in the instance group.
If a resource in an instance group fails to be created during an
update, the instance group itself will fail and the broken inner
resource will remain.
"""
files = {'provider.yaml': self.instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 2,
'image': self.conf.image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
files=files,
environment=env)
initial_resources = {
'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
stack = self.client.stacks.get(stack_identifier)
self.assert_instance_count(stack, 2)
nested_ident = self.assert_resource_is_a_stack(stack_identifier,
'JobServerGroup')
self._assert_instance_state(nested_ident, 2, 0)
initial_list = [res.resource_name
for res in self.client.resources.list(nested_ident)]
env['parameters']['size'] = 3
files2 = {'provider.yaml': self.bad_instance_template}
self.client.stacks.update(
stack_id=stack_identifier,
template=self.template,
files=files2,
disable_rollback=True,
parameters={},
environment=env
)
self._wait_for_stack_status(stack_identifier, 'UPDATE_FAILED')
# assert that there are 3 bad instances
nested_ident = self.assert_resource_is_a_stack(stack_identifier,
'JobServerGroup')
# 2 resources should be in update failed, and one create failed.
for res in self.client.resources.list(nested_ident):
if res.resource_name in initial_list:
self._wait_for_resource_status(nested_ident,
res.resource_name,
'UPDATE_FAILED')
else:
self._wait_for_resource_status(nested_ident,
res.resource_name,
'CREATE_FAILED')
def test_group_suspend_resume(self):
files = {'provider.yaml': self.instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 4,
'image': self.conf.image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
files=files, environment=env)
nested_ident = self.assert_resource_is_a_stack(stack_identifier,
'JobServerGroup')
self.stack_suspend(stack_identifier)
self._wait_for_all_resource_status(nested_ident, 'SUSPEND_COMPLETE')
self.stack_resume(stack_identifier)
self._wait_for_all_resource_status(nested_ident, 'RESUME_COMPLETE')
class AutoscalingGroupUpdatePolicyTest(AutoscalingGroupTest):
def ig_tmpl_with_updt_policy(self):
templ = json.loads(copy.deepcopy(self.template))
up = {"AutoScalingRollingUpdate": {
"MinInstancesInService": "1",
"MaxBatchSize": "2",
"PauseTime": "PT1S"}}
templ['Resources']['JobServerGroup']['UpdatePolicy'] = up
return templ
def update_instance_group(self, updt_template,
num_updates_expected_on_updt,
num_creates_expected_on_updt,
num_deletes_expected_on_updt):
# setup stack from the initial template
files = {'provider.yaml': self.instance_template}
size = 10
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': size,
'image': self.conf.image_ref,
'flavor': self.conf.instance_type}}
stack_name = self._stack_rand_name()
stack_identifier = self.stack_create(
stack_name=stack_name,
template=self.ig_tmpl_with_updt_policy(),
files=files,
environment=env)
stack = self.client.stacks.get(stack_identifier)
nested_ident = self.assert_resource_is_a_stack(stack_identifier,
'JobServerGroup')
# test that physical resource name of launch configuration is used
conf_name = self._stack_output(stack, 'JobServerConfigRef')
conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack_name
self.assertThat(conf_name,
matchers.MatchesRegex(conf_name_pattern))
# test the number of instances created
self.assert_instance_count(stack, size)
# saves info from initial list of instances for comparison later
init_instances = self.client.resources.list(nested_ident)
init_names = [inst.resource_name for inst in init_instances]
# test stack update
self.update_stack(stack_identifier, updt_template,
environment=env, files=files)
updt_stack = self.client.stacks.get(stack_identifier)
# test that the launch configuration is replaced
updt_conf_name = self._stack_output(updt_stack, 'JobServerConfigRef')
self.assertThat(updt_conf_name,
matchers.MatchesRegex(conf_name_pattern))
self.assertNotEqual(conf_name, updt_conf_name)
# test that the group size are the same
updt_instances = self.client.resources.list(nested_ident)
updt_names = [inst.resource_name for inst in updt_instances]
self.assertEqual(len(init_names), len(updt_names))
for res in updt_instances:
self.assertEqual('UPDATE_COMPLETE', res.resource_status)
# test that the appropriate number of instance names are the same
matched_names = set(updt_names) & set(init_names)
self.assertEqual(num_updates_expected_on_updt, len(matched_names))
# test that the appropriate number of new instances are created
self.assertEqual(num_creates_expected_on_updt,
len(set(updt_names) - set(init_names)))
# test that the appropriate number of instances are deleted
self.assertEqual(num_deletes_expected_on_updt,
len(set(init_names) - set(updt_names)))
# test that the older instances are the ones being deleted
if num_deletes_expected_on_updt > 0:
deletes_expected = init_names[:num_deletes_expected_on_updt]
self.assertNotIn(deletes_expected, updt_names)
def test_instance_group_update_replace(self):
"""Test simple update replace.
Test update replace with no conflict in batch size and minimum
instances in service.
"""
updt_template = self.ig_tmpl_with_updt_policy()
grp = updt_template['Resources']['JobServerGroup']
policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '1'
policy['MaxBatchSize'] = '3'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['ImageId'] = self.conf.minimal_image_ref
self.update_instance_group(updt_template,
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0)
def test_instance_group_update_replace_with_adjusted_capacity(self):
"""Test update replace with capacity adjustment.
Test update replace with capacity adjustment due to conflict in batch
size and minimum instances in service.
"""
updt_template = self.ig_tmpl_with_updt_policy()
grp = updt_template['Resources']['JobServerGroup']
policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '8'
policy['MaxBatchSize'] = '4'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['ImageId'] = self.conf.minimal_image_ref
self.update_instance_group(updt_template,
num_updates_expected_on_updt=8,
num_creates_expected_on_updt=2,
num_deletes_expected_on_updt=2)
def test_instance_group_update_replace_huge_batch_size(self):
"""Test update replace with a huge batch size."""
updt_template = self.ig_tmpl_with_updt_policy()
group = updt_template['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '0'
policy['MaxBatchSize'] = '20'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['ImageId'] = self.conf.minimal_image_ref
self.update_instance_group(updt_template,
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0)
def test_instance_group_update_replace_huge_min_in_service(self):
"""Update replace with huge number of minimum instances in service."""
updt_template = self.ig_tmpl_with_updt_policy()
group = updt_template['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '20'
policy['MaxBatchSize'] = '1'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['ImageId'] = self.conf.minimal_image_ref
self.update_instance_group(updt_template,
num_updates_expected_on_updt=9,
num_creates_expected_on_updt=1,
num_deletes_expected_on_updt=1)
def test_instance_group_update_no_replace(self):
"""Test simple update only and no replace.
Test simple update only and no replace (i.e. updated instance flavor
in Launch Configuration) with no conflict in batch size and
minimum instances in service.
"""
updt_template = self.ig_tmpl_with_updt_policy()
group = updt_template['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '1'
policy['MaxBatchSize'] = '3'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['InstanceType'] = 'm1.tiny'
self.update_instance_group(updt_template,
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0)
def test_instance_group_update_no_replace_with_adjusted_capacity(self):
"""Test update only and no replace with capacity adjustment.
Test update only and no replace (i.e. updated instance flavor in
Launch Configuration) with capacity adjustment due to conflict in
batch size and minimum instances in service.
"""
updt_template = self.ig_tmpl_with_updt_policy()
group = updt_template['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '8'
policy['MaxBatchSize'] = '4'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['InstanceType'] = 'm1.tiny'
self.update_instance_group(updt_template,
num_updates_expected_on_updt=8,
num_creates_expected_on_updt=2,
num_deletes_expected_on_updt=2)
class AutoScalingSignalTest(AutoscalingGroupTest):
template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to create multiple instances.",
"Parameters" : {"size": {"Type": "String", "Default": "1"},
"AZ": {"Type": "String", "Default": "nova"},
"image": {"Type": "String"},
"flavor": {"Type": "String"}},
"Resources": {
"custom_lb": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {"Ref": "image"},
"InstanceType": {"Ref": "flavor"},
"UserData": "foo",
"SecurityGroups": [ "sg-1" ],
"Tags": []
},
"Metadata": {
"IPs": {"Fn::GetAtt": ["JobServerGroup", "InstanceList"]}
}
},
"JobServerGroup": {
"Type" : "AWS::AutoScaling::AutoScalingGroup",
"Properties" : {
"AvailabilityZones" : [{"Ref": "AZ"}],
"LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
"DesiredCapacity" : {"Ref": "size"},
"MinSize" : "0",
"MaxSize" : "20"
}
},
"JobServerConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Metadata": {"foo": "bar"},
"Properties": {
"ImageId" : {"Ref": "image"},
"InstanceType" : {"Ref": "flavor"},
"SecurityGroups" : [ "sg-1" ],
"UserData" : "jsconfig data"
}
},
"ScaleUpPolicy" : {
"Type" : "AWS::AutoScaling::ScalingPolicy",
"Properties" : {
"AdjustmentType" : "ChangeInCapacity",
"AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
"Cooldown" : "0",
"ScalingAdjustment": "1"
}
},
"ScaleDownPolicy" : {
"Type" : "AWS::AutoScaling::ScalingPolicy",
"Properties" : {
"AdjustmentType" : "ChangeInCapacity",
"AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
"Cooldown" : "0",
"ScalingAdjustment" : "-2"
}
}
},
"Outputs": {
"InstanceList": {"Value": {
"Fn::GetAtt": ["JobServerGroup", "InstanceList"]}}
}
}
'''
lb_template = '''
heat_template_version: 2013-05-23
parameters:
ImageId: {type: string}
InstanceType: {type: string}
SecurityGroups: {type: comma_delimited_list}
UserData: {type: string}
Tags: {type: comma_delimited_list, default: "x,y"}
resources:
outputs:
PublicIp: {value: "not-used"}
AvailabilityZone: {value: 'not-used1'}
PrivateDnsName: {value: 'not-used2'}
PublicDnsName: {value: 'not-used3'}
PrivateIp: {value: 'not-used4'}
'''
def setUp(self):
super(AutoScalingSignalTest, self).setUp()
self.build_timeout = self.conf.build_timeout
self.build_interval = self.conf.build_interval
self.files = {'provider.yaml': self.instance_template,
'lb.yaml': self.lb_template}
self.env = {'resource_registry':
{'resources':
{'custom_lb': {'AWS::EC2::Instance': 'lb.yaml'}},
'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 2,
'image': self.conf.image_ref,
'flavor': self.conf.instance_type}}
def check_instance_count(self, stack_identifier, expected):
md = self.client.resources.metadata(stack_identifier, 'custom_lb')
actual_md = len(md['IPs'].split(','))
if actual_md != expected:
LOG.warning('check_instance_count exp:%d, meta:%s' % (expected,
md['IPs']))
return False
stack = self.client.stacks.get(stack_identifier)
inst_list = self._stack_output(stack, 'InstanceList')
actual = len(inst_list.split(','))
if actual != expected:
LOG.warning('check_instance_count exp:%d, act:%s' % (expected,
inst_list))
return actual == expected
def test_scaling_meta_update(self):
"""Use heatclient to signal the up and down policy.
Then confirm that the metadata in the custom_lb is updated each
time.
"""
stack_identifier = self.stack_create(template=self.template,
files=self.files,
environment=self.env)
self.assertTrue(test.call_until_true(
self.build_timeout, self.build_interval,
self.check_instance_count, stack_identifier, 2))
nested_ident = self.assert_resource_is_a_stack(stack_identifier,
'JobServerGroup')
# Scale up one, Trigger alarm
self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
self.assertTrue(test.call_until_true(
self.build_timeout, self.build_interval,
self.check_instance_count, stack_identifier, 3))
# Scale down two, Trigger alarm
self.client.resources.signal(stack_identifier, 'ScaleDownPolicy')
self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
self.assertTrue(test.call_until_true(
self.build_timeout, self.build_interval,
self.check_instance_count, stack_identifier, 1))
def test_signal_with_policy_update(self):
"""Prove that an updated policy is used in the next signal."""
stack_identifier = self.stack_create(template=self.template,
files=self.files,
environment=self.env)
self.assertTrue(test.call_until_true(
self.build_timeout, self.build_interval,
self.check_instance_count, stack_identifier, 2))
nested_ident = self.assert_resource_is_a_stack(stack_identifier,
'JobServerGroup')
# Scale up one, Trigger alarm
self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
self.assertTrue(test.call_until_true(
self.build_timeout, self.build_interval,
self.check_instance_count, stack_identifier, 3))
# increase the adjustment to "+2" and remove the DesiredCapacity
# so we don't go from 3 to 2.
new_template = self.template.replace(
'"ScalingAdjustment": "1"',
'"ScalingAdjustment": "2"').replace(
'"DesiredCapacity" : {"Ref": "size"},', '')
self.update_stack(stack_identifier, template=new_template,
environment=self.env, files=self.files)
# Scale up two, Trigger alarm
self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
self.assertTrue(test.call_until_true(
self.build_timeout, self.build_interval,
self.check_instance_count, stack_identifier, 5))
def test_signal_during_suspend(self):
"""Prove that a signal will fail when the stack is in suspend."""
stack_identifier = self.stack_create(template=self.template,
files=self.files,
environment=self.env)
self.assertTrue(test.call_until_true(
self.build_timeout, self.build_interval,
self.check_instance_count, stack_identifier, 2))
nested_ident = self.assert_resource_is_a_stack(stack_identifier,
'JobServerGroup')
# suspend the top level stack.
self.client.actions.suspend(stack_id=stack_identifier)
# Wait for stack to reach SUSPEND_COMPLETE
self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
# Send a signal and an exception will raise
ex = self.assertRaises(exc.BadRequest,
self.client.resources.signal,
stack_identifier, 'ScaleUpPolicy')
error_msg = 'Signal resource during SUSPEND is not supported'
self.assertIn(error_msg, six.text_type(ex))
ev = self.wait_for_event_with_reason(
stack_identifier,
reason='Cannot signal resource during SUSPEND',
rsrc_name='ScaleUpPolicy')
self.assertEqual('SUSPEND_COMPLETE', ev[0].resource_status)
# still SUSPEND_COMPLETE (not gone to UPDATE_COMPLETE)
self._wait_for_stack_status(nested_ident, 'SUSPEND_COMPLETE')
self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
# still 2 instances.
self.assertTrue(test.call_until_true(
self.build_timeout, self.build_interval,
self.check_instance_count, stack_identifier, 2))
|
|
#!/usr/bin/env python2.7
# =============================================================================
# IMPORTS
# =============================================================================
import ConfigParser
import MySQLdb
import praw
from praw.errors import APIException, RateLimitExceeded
import re
from requests.exceptions import HTTPError, ConnectionError, Timeout
from socket import timeout
import time
from enum import Enum
import nltk
from threading import Thread
# =============================================================================
# GLOBALS
# =============================================================================
config = ConfigParser.ConfigParser()
config.read("asoiafsearchbot.cfg")
# Database info
host = config.get("SQL", "host")
user = config.get("SQL", "user")
passwd = config.get("SQL", "passwd")
db = config.get("SQL", "db")
table = config.get("SQL", "table")
MAX_ROWS = 30
BOOK_CONTAINER = []
sent_tokenize = nltk.data.load('tokenizers/punkt/english.pickle')
# Reddit Info
user_agent = (
"ASOIAFSearchBot -Help you find that comment"
"- by /u/RemindMeBotWrangler")
reddit = praw.Reddit(user_agent = user_agent)
reddit_user = config.get("Reddit", "username")
reddit_pass = config.get("Reddit", "password")
reddit.login(reddit_user, reddit_pass)
# =============================================================================
# CLASSES
# =============================================================================
class Connect(object):
"""
DB connection class
"""
connection = None
cursor = None
def __init__(self):
self.connection = MySQLdb.connect(
host=host, user=user, passwd=passwd, db=db
)
self.cursor = self.connection.cursor()
def execute(self, command):
self.cursor.execute(command)
def fetchall(self):
return self.cursor.fetchall()
def commit(self):
self.connection.commit()
def close(self):
self.connection.close()
class Title(Enum):
All = 0
AGOT = 1
ACOK = 2
ASOS = 3
AFFC = 4
ADWD = 5
DE = 6
PQ = 7
class Books(object):
"""
Book class, holds methods to find the correct occurrence
of the given search term in each chapter.
"""
# commented already messaged are appended to avoid messaging again
commented = []
# already searched terms
# TODO: Make this functionality
termHistory = {}
termHistorySensitive = {}
def __init__(self, comment):
self.comment = comment
self.bookCommand = None
self.title = None
self._chapterPovMessage = None
self._bookContainer = None
self._searchTerm = ""
self._bookQuery = ""
self._sensitive = None
self._listOccurrence = []
self._rowOccurrence = 0
self._total = 0
self._rowCount = 0
self._commentUser = ""
self._message = ""
self._links = (
"\n\n**Try the practice thread to reduce spam and keep the current thread on topic.**\n\n"
"\n_____\n"
"[^([More Info Here])]"
"(http://www.reddit.com/r/asoiaf/comments/25amke/"
"spoilers_all_introducing_asoiafsearchbot_command/) ^| "
"[^([Practice Thread])]"
"(http://www.reddit.com/r/asoiaf/comments/26ez9u/"
"spoilers_all_asoiafsearchbot_practice_thread/) ^| "
"[^([Character Specific Commands])]"
"(http://www.reddit.com/r/asoiaf/comments/26ez9u/"
"spoilers_all_asoiafsearchbot_practice_thread/) ^| "
"[^([Suggestions])]"
"(http://www.reddit.com/message/compose/?to=RemindMeBotWrangler&subject=Suggestion) ^| "
"[^([Code])]"
"(https://github.com/SIlver--/asoiafsearchbot-reddit)"
)
def parse_comment(self):
"""
Changes user comment from:
Lorem ipsum dolor sit amet, consectetur adipiscing elit.
ullam laoreet volutpat accumsan.
SearchAll! "SEARCH TERM"
into finally just:
SEARCH TERM
"""
# Removes everything before and including Search.!
self._searchTerm = ''.join(re.split(
r'Search(All|AGOT|ACOK|ASOS|AFFC|ADWD|DE|PQ)!',
self.comment.body)[2:]
)
# checks to see if user wants a character chapter only
searchSqrBrackets = re.search('\[(.*?)\]', self._searchTerm)
if searchSqrBrackets:
chapterPov = searchSqrBrackets.group(0)
self.which_pov(chapterPov)
# Kept for legacy reasons
searchBrackets = re.search('"(.*?)"', self._searchTerm)
if searchBrackets:
self._searchTerm = searchBrackets.group(0)
searchTri = re.search('\((.*?)\)', self._searchTerm)
if searchTri:
self._searchTerm = searchTri.group(0)
# quotations at start and end
if searchBrackets or searchTri:
self._searchTerm = self._searchTerm[1:-1]
# legacy: as the user doesn't need to do "" or ()
self._searchTerm = self._searchTerm.strip()
def from_database_to_dict(self):
"""
Transfers everything from the database to a tuple type
"""
# incase of quotations
grabDB = Connect()
query = (
'SELECT * from {table} {bookQuery}'
'ORDER BY FIELD '
'({col2}, "AGOT", "ACOK", "ASOS", "AFFC", "ADWD", "DE", "PQ")'
).format(
table = table,
bookQuery = self._bookQuery,
col1 = "story",
col2 = "book")
print "----", query
grabDB.execute(query)
# Each row counts as a chapter
self._bookContainer = grabDB.fetchall()
grabDB.close()
def find_the_search_term(self,rowsTooLong=None):
"""
Search through the books which chapter holds the search term.
"""
# Allows the tuple to be changable
holdList = list(self._bookContainer)
for i in range(len(holdList)):
# checks if the word is in that chapter
foundTerm = re.findall(r"\b" + re.escape(self._searchTerm) +
r"\b", holdList[i][5], flags=re.IGNORECASE)
# count the occurrence
storyLen = len(foundTerm)
holdList[i] += (storyLen,)
if foundTerm:
self._total += storyLen
self._rowCount += 1
holdList[i] += (self.sentences_to_quote(holdList[i][5]), )
self.append_to_list(holdList[i])
# call upon if it's rowcount ends up being above 30
if self._rowCount > MAX_ROWS:
holdList = self.rows_too_long(holdList)
self._listOccurrence[:] = []
for i in range(len(holdList)):
self.append_to_list(holdList[i])
def append_to_list(self, holdList):
# Stores each found word as a list of strings
self._listOccurrence.append(
"| {series}| {book}| {number}| {chapter}| {pov}| {occur}| {quote}".format(
series = holdList[0],
book = holdList[1],
number = holdList[2],
chapter = holdList[3],
pov = holdList[4],
occur = holdList[6],
quote = holdList[7]
)
)
def rows_too_long(self,holdList):
"""
Sorts the rows to only show top 30 results
remove after 30
"""
holdList = sorted(holdList, key=lambda tup: tup[6], reverse=True)
holdList[MAX_ROWS:] = []
return holdList
def sentences_to_quote(self, chapter):
"""
Seperates the chapter into sentences
Returns the first occurrence of the word in the sentence
"""
# Seperate the chapters into sentences
searchSentences = sent_tokenize.tokenize(chapter, realign_boundaries=True)
findIt = r"\b" + re.escape(self._searchTerm) + r"\b"
for word in searchSentences:
regex = (re.sub(findIt,
"**" + self._searchTerm.upper() + "**",
word, flags=re.IGNORECASE))
if regex != word:
return regex
def which_book(self):
"""
self.title holds the farthest book in the series the
SQL statement should go. So if the title is ASOS it will only
do every occurence up to ASOS ONLY for SearchAll!
"""
"""
# If the user didn't include a characterPOV, add the WHERE
# If they do(It's not a NONE), add AND
if self._bookQuery == "":
#self._bookQuery = "WHERE "
#else:
self._bookQuery += "AND "
"""
# When command is SearchAll! the specific searches
# will instead be used. example SearchASOS!
if self.bookCommand.name != 'All':
self._bookQuery += ('WHERE {col2} = "{book}" '
).format(col2 = "book",
book = self.bookCommand.name)
# Starts from AGOT ends at what self.title is
# Not needed for All(0) because the SQL does it by default
elif self.title.value != 0:
# First time requires AND from earlier, next are ORs
self._bookQuery += ('WHERE ({col2} = "{book}" '
).format(col2 = "book",
book = 'AGOT')
# start the loop after AGOT
for x in range(2, self.title.value+1):
# assign current loop the name of the enum's value
curBook = Title(x).name
# Shouldn't add ORs if it's AGOT
# and shouldn't add D&E and P&Q
if Title(x) != 1:
self._bookQuery += ('OR {col2} = "{book}" '
).format(col2 = "book",
book = curBook)
self._bookQuery += ")" # close the WHERE in the MSQL
def which_pov(self,chapterPov):
"""
Allows the user to search specific character chapters only
"""
chapterPovName = None
if chapterPov == "[Aeron]":
hapterPovName = "Aeron Greyjoy"
if chapterPov == "[Areo]":
chapterPovName = "Areo Hotah"
if chapterPov == "[Arianne]":
chapterPovName = "Arianne Martell"
if chapterPov == "[Arya]":
chapterPovName = "Arya Stark"
if chapterPov == "[Asha]":
chapterPovName = "Asha Greyjoy"
if chapterPov == "[Barristan]":
chapterPovName = "Barristan Selmy"
if chapterPov == "[Bran]":
chapterPovName = "Bran Stark"
if chapterPov == "[Brienne]":
chapterPovName = "Brienne of Tarth"
if chapterPov == "[Cat]":
chapterPovName = "Catelyn Tully"
if chapterPov == "[Cersei]":
chapterPovName = "Cersei Lannister"
if chapterPov == "[Dany]":
chapterPovName = "Daenerys Targaryen"
if chapterPov == "[Davos]":
chapterPovName = "Davos Seaworth"
if chapterPov == "[Ned]":
chapterPovName = "Eddard Stark"
if chapterPov == "[Jaime]":
chapterPovName = "Jaime Lannister"
if chapterPov == "[JonCon]":
chapterPovName = "Jon Connington"
if chapterPov == "[Jon]":
chapterPovName = "Jon Snow"
if chapterPov == "[Melisandre]":
chapterPovName = "Melisandre"
if chapterPov == "[Quentyn]":
chapterPovName = "Quentyn Martell"
if chapterPov == "[Samwell]":
chapterPovName = "Samwell Tarly"
if chapterPov == "[Sansa]":
chapterPovName = "Sansa Stark"
if chapterPov == "[Theon]":
chapterPovName = "Theon Greyjoy"
if chapterPov == "[Tyrion]":
chapterPovName = "Tyrion Lannister"
if chapterPov == "[Victarion]":
chapterPovName = "Victarion Greyjoy"
# if no command is found, user entered in an incorrect command
if chapterPovName != None:
# If the user didn't include a book, add the WHERE
# If they do add AND
if self._bookQuery == "":
self._bookQuery = "WHERE "
else:
self._bookQuery += "AND "
self._bookQuery += ('chapterpov = "{charactername}" ').format(
charactername = chapterPovName,
)
self._chapterPovMessage = ("**ONLY** for **{character}** chapters.\n\n").format(
character = chapterPovName,
)
else:
self._chapterPovMessage = ("Note: Looks like you didn't enter in a correct character name. "
"Searching all characters instead. "
"Refer to this {thread} for correct command usage.\n\n").format(
thread = "[Thread](Linkhere.com)"
)
def build_message(self):
"""
Build message that will be sent to the reddit user
"""
commentUser = (
"**SEARCH TERM: {term}**\n\n"
"Total Occurrence: {totalOccur} \n\n"
"Total Chapters: {totalChapter} \n\n"
"{warning}"
"{chapterpov}"
"######	\n\n####	\n\n#####	\n\n"
"	\n\n	\n\n"
">{message}"
"{link}"
)
warning = ""
if self.title.name != 'All' and self.title.name != 'PQ' and self.title.name != 'DE':
warning = ("**ONLY** for **{book}** and under due to the spoiler tag in the title.\n\n").format(
book = self.title.name,
)
if self._rowCount > MAX_ROWS:
warning += ("Excess number of chapters. Sorted by highest to lowest, top 30 results only.\n\n")
# Avoids spam and builds table heading only when condition is met
if self._total > 0:
self._message += (
"| Series| Book| Chapter| Chapter Name| Chapter POV| Occurrence| Quote^(First Occurrence Only)\n"
)
self._message += "|:{dash}|:{dash}|:{dash}|:{dash}|:{dash}|:{dash}|:{dash}|\n".format(dash='-' * 11)
# Each element added as a new row with new line
for row in self._listOccurrence:
self._message += row + "\n"
elif self._total == 0:
self._message = "**Sorry no results.**\n\n"
self._commentUser = commentUser.format(
warning = warning,
chapterpov = self._chapterPovMessage,
term = self._searchTerm,
totalOccur = self._total,
message = self._message,
link = self._links,
totalChapter = self._rowCount
)
def reply(self, spoiler=False):
"""
Reply to reddit user. If the search would be a spoiler
Send different message.
"""
try:
if spoiler:
self._commentUser = (
">**Sorry, fulfilling this request would be a spoiler due to the spoiler tag in this thread. "
"Mayhaps try the request in another thread, heh.**\n\n"
"{link}"
).format(link = self._links)
print self._commentUser
#self.comment.reply(self._commentUser)
except (HTTPError, ConnectionError, Timeout, timeout) as err:
print err
except RateLimitExceeded as err:
print err
time.sleep(10)
except APIException as err: # Catch any less specific API errors
print err
else:
self.commented.append(self.comment.id)
def watch_for_spoilers(self):
"""
Decides what the scope of spoilers based of the title.
This means that searchADWD! Shouldn't besed in (Spoiler AGOT).
"""
# loop formats each name into the regex
# then checks regex against the title
# number used for which_book() loop
for name, member in Title.__members__.items():
# Remove first letter incase of cases like GOT
regex = ("(\(|\[).*({name}|{nameRemove}).*(\)|\])"
).format(name = name.lower(), nameRemove = name[1:].lower())
if (re.search(regex, self.comment.link_title.lower()) and
# stops false positives as their regex is more complex
name != 'DE' and name != 'PQ'):
self.title = member
break
# these titles are not in Title Enum but follows the same guidelines
if re.search ("(\(|\[).*(published|twow).*(\)|\])"
, self.comment.link_title.lower()):
self.title = Title.All
# TODO: Fix when new books are added to the database
if re.search ("(\(|\[).*(d&e|d & e|dunk.*egg).*(\)|\])", self.comment.link_title.lower()):
self.title = Title.DE
if re.search ("(\(|\[).*(p\s?\&\s?q).*(\)|\])", self.comment.link_title.lower()):
self.title = Title.PQ
# Decides which book the user picked based on the command.
# SearchAGOT! to SearchADWD!
for name, member in Title.__members__.items():
search = ("Search{name}!").format(name = name)
if search in self.comment.body:
self.bookCommand = member
# accounts for /r/pureasoiaf
if str(self.comment.subreddit) == "pureasoiaf":
sub = reddit.get_submission(self.comment.permalink)
if sub.link_flair_text == 'Spoilers Default':
self.title = Title.All
if sub.link_flair_text == 'No Spoilers':
self.title = Title.None
def run(self):
self.parse_comment()
self.from_database_to_dict()
self.find_the_search_term()
self.build_message()
self.reply()
# =============================================================================
# MAIN
# =============================================================================
def main():
"""Main runner"""
while True:
print "start"
try:
comments = praw.helpers.comment_stream(
reddit, 'pureasoiaf+asoiaf', limit = 50, verbosity = 0)
commentCount = 0
for comment in comments:
commentCount += 1
# Makes the instance attribute bookTuple
allBooks = Books(comment)
if re.search('Search(All|AGOT|ACOK|ASOS|AFFC|ADWD|DE|PQ)!', comment.body):
allBooks.watch_for_spoilers()
# Note: None needs to be explict as this evalutes to
# with Spoilers All as it's 0
if allBooks.title != None:
allBooks.which_book()
# Don't respond to the comment a second time
if allBooks.comment.id not in allBooks.commented:
# skips when SearchCOMMAND! is higher than (Spoiler Tag)
if (allBooks.bookCommand.value <= allBooks.title.value or
allBooks.title.value == 0):
t = Thread(target=allBooks.run())
t.start()
elif allBooks.comment.id not in allBooks.commented:
allBooks.reply(spoiler=True)
elif allBooks.comment.id not in allBooks.commented:
# Sends apporiate message if it's a spoiler
allBooks.reply(spoiler=True)
if commentCount == 200:
break
print "sleeping"
time.sleep(25)
except Exception as err:
print err
# =============================================================================
# RUNNER
# =============================================================================
if __name__ == '__main__':
main()
|
|
import math
import mathutils
import bpy
from bpy import data, context, types
from bpy_extras.io_utils import axis_conversion
from .. import constants, logger, utilities, exceptions
from .constants import (
MESH,
EMPTY,
ARMATURE,
LAMP,
SPOT,
SUN,
POINT,
HEMI,
CAMERA,
PERSP,
ORTHO,
RENDER,
NO_SHADOW,
ZYX
)
# Blender doesn't seem to have a good way to link a mesh back to the
# objects that are instancing it, or it is bloody obvious and I haven't
# discovered yet. This manifest serves as a way for me to map a mesh
# node to the object nodes that are using it.
_MESH_MAP = {}
def _object(func):
"""
:param func:
"""
def inner(arg, *args, **kwargs):
"""
:param arg:
:param *args:
:param **kwargs:
"""
if isinstance(arg, types.Object):
obj = arg
else:
obj = data.objects[arg]
return func(obj, *args, **kwargs)
return inner
def clear_mesh_map():
"""Clears the mesh map, required on initialization"""
_MESH_MAP.clear()
def assemblies(valid_types, options):
"""
:param valid_types:
:param options:
"""
logger.debug('object.assemblies(%s)', valid_types)
for obj in data.objects:
# rigged assets are parented under armature nodes
if obj.parent and obj.parent.type != ARMATURE:
continue
if obj.parent and obj.parent.type == ARMATURE:
logger.info('Has armature parent %s', obj.name)
if _valid_node(obj, valid_types, options):
yield obj.name
@_object
def cast_shadow(obj):
"""
:param obj:
"""
logger.debug('object.cast_shadow(%s)', obj)
if obj.type == LAMP:
if obj.data.type in (SPOT, SUN):
ret = obj.data.shadow_method != NO_SHADOW
else:
logger.info('%s is a lamp but this lamp type does not '\
'have supported shadows in ThreeJS', obj.name)
ret = None
return ret
elif obj.type == MESH:
mat = material(obj)
if mat:
return data.materials[mat].use_cast_shadows
else:
return False
@_object
def children(obj, valid_types):
"""
:param obj:
:param valid_types:
"""
logger.debug('object.children(%s, %s)', obj, valid_types)
for child in obj.children:
if child.type in valid_types:
yield child.name
@_object
def material(obj):
"""
:param obj:
"""
logger.debug('object.material(%s)', obj)
try:
return obj.material_slots[0].name
except IndexError:
pass
def extract_time(fcurves, start_index):
time = []
for xx in fcurves[start_index].keyframe_points:
time.append(xx.co.x)
return time
def merge_sorted_lists(l1, l2):
sorted_list = []
l1 = l1[:]
l2 = l2[:]
while (l1 and l2):
h1 = l1[0]
h2 = l2[0]
if h1 == h2:
sorted_list.append(h1)
l1.pop(0)
l2.pop(0)
elif h1 < h2:
l1.pop(0)
sorted_list.append(h1)
else:
l2.pop(0)
sorted_list.append(h2)
# Add the remaining of the lists
sorted_list.extend(l1 if l1 else l2)
return sorted_list
def appendVec3(track, time, vec3):
track.append({ "time": time, "value": [ vec3.x, vec3.y, vec3.z ] })
def appendQuat(track, time, quat):
track.append({ "time": time, "value": [ quat.x, quat.y, quat.z, quat.w ] })
# trackable transform fields ( <output field>, <nb fcurve> )
TRACKABLE_FIELDS = {
"location": ( ".position", 3, "vector3" ),
"scale": ( ".scale", 3, "vector3" ),
"rotation_euler": ( ".rotation", 3, "vector3" ),
"rotation_quaternion": ( ".quaternion", 4, "quaternion" )
}
EXPORTED_TRACKABLE_FIELDS = [ "location", "scale", "rotation_quaternion" ]
@_object
def animated_xform(obj, options):
if obj.animation_data is None:
return []
fcurves = obj.animation_data
if not fcurves:
return []
if fcurves.action is None:
return []
fcurves = fcurves.action.fcurves
objName = obj.name
tracks = []
i = 0
nb_curves = len(fcurves)
# extract unique frames
times = None
while i < nb_curves:
field_info = TRACKABLE_FIELDS.get(fcurves[i].data_path)
if field_info:
newTimes = extract_time(fcurves, i)
times = merge_sorted_lists(times, newTimes) if times else newTimes # merge list
i += field_info[1]
else:
i += 1
# init tracks
track_loc = []
for fld in EXPORTED_TRACKABLE_FIELDS:
field_info = TRACKABLE_FIELDS[fld]
track = []
track_loc.append(track)
tracks.append({
constants.NAME: objName+field_info[0],
constants.TYPE: field_info[2],
constants.KEYS: track
})
# track arrays
track_sca = track_loc[1]
track_qua = track_loc[2]
track_loc = track_loc[0]
use_inverted = options.get(constants.HIERARCHY, False) and obj.parent
if times == None:
logger.info("In animated xform: Unable to extract trackable fields from %s", objName)
return tracks
# for each frame
inverted_fallback = mathutils.Matrix() if use_inverted else None
convert_matrix = AXIS_CONVERSION # matrix to convert the exported matrix
original_frame = context.scene.frame_current
for time in times:
context.scene.frame_set(time, 0.0)
if use_inverted: # need to use the inverted, parent matrix might have chance
convert_matrix = obj.parent.matrix_world.inverted(inverted_fallback)
wm = convert_matrix * obj.matrix_world
appendVec3(track_loc, time, wm.to_translation())
appendVec3(track_sca, time, wm.to_scale() )
appendQuat(track_qua, time, wm.to_quaternion() )
context.scene.frame_set(original_frame, 0.0) # restore to original frame
# TODO: remove duplicated key frames
return tracks
@_object
def custom_properties(obj):
"""
:param obj:
"""
logger.debug('object.custom_properties(%s)', obj)
# Grab any properties except those marked private (by underscore
# prefix) or those with types that would be rejected by the JSON
# serializer object model.
return {kvp[0]: kvp[1] for kvp in obj.data.items() if kvp[0][:1] != '_' and isinstance(kvp[1], constants.VALID_DATA_TYPES)}
@_object
def mesh(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.mesh(%s, %s)', obj, options)
if obj.type != MESH:
return
for mesh_, objects in _MESH_MAP.items():
if obj in objects:
return mesh_
else:
logger.debug('Could not map object, updating manifest')
mesh_ = extract_mesh(obj, options)
if len(mesh_.tessfaces) is not 0:
manifest = _MESH_MAP.setdefault(mesh_.name, [])
manifest.append(obj)
mesh_name = mesh_.name
else:
# possibly just being used as a controller
logger.info('Object %s has no faces', obj.name)
mesh_name = None
return mesh_name
@_object
def name(obj):
"""
:param obj:
"""
return obj.name
@_object
def node_type(obj):
"""
:param obj:
"""
logger.debug('object.node_type(%s)', obj)
# standard transformation nodes are inferred
if obj.type == MESH:
return constants.MESH.title()
elif obj.type == EMPTY:
return constants.OBJECT.title()
dispatch = {
LAMP: {
POINT: constants.POINT_LIGHT,
SUN: constants.DIRECTIONAL_LIGHT,
SPOT: constants.SPOT_LIGHT,
HEMI: constants.HEMISPHERE_LIGHT
},
CAMERA: {
PERSP: constants.PERSPECTIVE_CAMERA,
ORTHO: constants.ORTHOGRAPHIC_CAMERA
}
}
try:
return dispatch[obj.type][obj.data.type]
except AttributeError:
msg = 'Invalid type: %s' % obj.type
raise exceptions.UnsupportedObjectType(msg)
def nodes(valid_types, options):
"""
:param valid_types:
:param options:
"""
for obj in data.objects:
if _valid_node(obj, valid_types, options):
yield obj.name
@_object
def position(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.position(%s)', obj)
vector = matrix(obj, options).to_translation()
return (vector.x, vector.y, vector.z)
@_object
def receive_shadow(obj):
"""
:param obj:
"""
if obj.type == MESH:
mat = material(obj)
if mat:
return data.materials[mat].use_shadows
else:
return False
AXIS_CONVERSION = axis_conversion(to_forward='Z', to_up='Y').to_4x4()
@_object
def matrix(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.matrix(%s)', obj)
if options.get(constants.HIERARCHY, False) and obj.parent:
parent_inverted = obj.parent.matrix_world.inverted(mathutils.Matrix())
return parent_inverted * obj.matrix_world
else:
return AXIS_CONVERSION * obj.matrix_world
@_object
def rotation(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.rotation(%s)', obj)
vector = matrix(obj, options).to_euler(ZYX)
return (vector.x, vector.y, vector.z)
@_object
def scale(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.scale(%s)', obj)
vector = matrix(obj, options).to_scale()
return (vector.x, vector.y, vector.z)
@_object
def select(obj):
"""
:param obj:
"""
obj.select = True
@_object
def unselect(obj):
"""
:param obj:
"""
obj.select = False
@_object
def visible(obj):
"""
:param obj:
"""
logger.debug('object.visible(%s)', obj)
return obj.is_visible(context.scene)
def extract_mesh(obj, options, recalculate=False):
"""
:param obj:
:param options:
:param recalculate: (Default value = False)
"""
logger.debug('object.extract_mesh(%s, %s)', obj, options)
apply_modifiers = options.get(constants.APPLY_MODIFIERS, True)
if apply_modifiers:
bpy.ops.object.mode_set(mode='OBJECT')
mesh_node = obj.to_mesh(context.scene, apply_modifiers, RENDER)
# transfer the geometry type to the extracted mesh
mesh_node.THREE_geometry_type = obj.data.THREE_geometry_type
# now determine whether or not to export using the geometry type
# set globally from the exporter's options or to use the local
# override on the mesh node itself
opt_buffer = options.get(constants.GEOMETRY_TYPE)
opt_buffer = opt_buffer == constants.BUFFER_GEOMETRY
prop_buffer = mesh_node.THREE_geometry_type == constants.BUFFER_GEOMETRY
bpy.context.scene.objects.active = obj
# if doing buffer geometry it is imperative to triangulate the mesh
if opt_buffer or prop_buffer:
original_mesh = obj.data
obj.data = mesh_node
logger.debug('swapped %s for %s',
original_mesh.name,
mesh_node.name)
hidden_state = obj.hide
obj.hide = False
bpy.ops.object.mode_set(mode='OBJECT')
obj.select = True
bpy.context.scene.objects.active = obj
logger.info('Applying triangulation to %s', obj.data.name)
bpy.ops.object.modifier_add(type='TRIANGULATE')
bpy.ops.object.modifier_apply(apply_as='DATA',
modifier='Triangulate')
obj.data = original_mesh
obj.select = False
obj.hide = hidden_state
# split sharp edges
original_mesh = obj.data
obj.data = mesh_node
obj.select = True
bpy.ops.object.modifier_add(type='EDGE_SPLIT')
bpy.context.object.modifiers['EdgeSplit'].use_edge_angle = False
bpy.context.object.modifiers['EdgeSplit'].use_edge_sharp = True
bpy.ops.object.modifier_apply(apply_as='DATA', modifier='EdgeSplit')
obj.select = False
obj.data = original_mesh
# recalculate the normals to face outwards, this is usually
# best after applying a modifiers, especialy for something
# like the mirror
if recalculate:
logger.info('Recalculating normals')
original_mesh = obj.data
obj.data = mesh_node
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent()
bpy.ops.object.editmode_toggle()
obj.data = original_mesh
if not options.get(constants.SCENE):
xrot = mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
mesh_node.transform(xrot * obj.matrix_world)
# blend shapes
if options.get(constants.BLEND_SHAPES) and not options.get(constants.MORPH_TARGETS):
original_mesh = obj.data
if original_mesh.shape_keys:
logger.info('Using blend shapes')
obj.data = mesh_node # swap to be able to add the shape keys
shp = original_mesh.shape_keys
animCurves = shp.animation_data
if animCurves:
animCurves = animCurves.action.fcurves
src_kbs = shp.key_blocks
for key in src_kbs.keys():
logger.info("-- Parsing key %s", key)
obj.shape_key_add(name=key, from_mix=False)
src_kb = src_kbs[key].data
if key == 'Basis':
dst_kb = mesh_node.vertices
else:
dst_kb = mesh_node.shape_keys.key_blocks[key].data
for idx in range(len(src_kb)):
dst_kb[idx].co = src_kb[idx].co
if animCurves:
data_path = 'key_blocks["'+key+'"].value'
for fcurve in animCurves:
if fcurve.data_path == data_path:
dst_kb = mesh_node.shape_keys.key_blocks[key]
for xx in fcurve.keyframe_points:
dst_kb.value = xx.co.y
dst_kb.keyframe_insert("value",frame=xx.co.x)
pass
break # no need to continue to loop
pass
obj.data = original_mesh
# now generate a unique name
index = 0
while True:
if index is 0:
mesh_name = '%sGeometry' % obj.data.name
else:
mesh_name = '%sGeometry.%d' % (obj.data.name, index)
try:
data.meshes[mesh_name]
index += 1
except KeyError:
break
mesh_node.name = mesh_name
mesh_node.update(calc_tessface=True)
mesh_node.calc_normals()
mesh_node.calc_tessface()
scale_ = options.get(constants.SCALE, 1)
mesh_node.transform(mathutils.Matrix.Scale(scale_, 4))
return mesh_node
def objects_using_mesh(mesh_node):
"""
:param mesh_node:
:return: list of object names
"""
logger.debug('object.objects_using_mesh(%s)', mesh_node)
for mesh_name, objects in _MESH_MAP.items():
if mesh_name == mesh_node.name:
return objects
else:
logger.warning('Could not find mesh mapping')
def prep_meshes(options):
"""Prep the mesh nodes. Preperation includes identifying:
- nodes that are on visible layers
- nodes that have export disabled
- nodes that have modifiers that need to be applied
:param options:
"""
logger.debug('object.prep_meshes(%s)', options)
mapping = {}
visible_layers = _visible_scene_layers()
for obj in data.objects:
if obj.type != MESH:
continue
# this is ideal for skipping controller or proxy nodes
# that may apply to a Blender but not a 3js scene
if not _on_visible_layer(obj, visible_layers):
logger.info('%s is not on a visible layer', obj.name)
continue
# if someone really insists on a visible node not being exportable
if not obj.THREE_export:
logger.info('%s export is disabled', obj.name)
continue
# need to apply modifiers before moving on, and before
# handling instancing. it is possible for 2 or more objects
# instance the same mesh but to not all use the same modifiers
# this logic identifies the object with modifiers and extracts
# the mesh making the mesh unique to this particular object
if len(obj.modifiers):
logger.info('%s has modifiers' % obj.name)
mesh_node = extract_mesh(obj, options, recalculate=True)
_MESH_MAP[mesh_node.name] = [obj]
continue
logger.info('adding mesh %s.%s to prep',
obj.name, obj.data.name)
manifest = mapping.setdefault(obj.data.name, [])
manifest.append(obj)
# now associate the extracted mesh node with all the objects
# that are instancing it
for objects in mapping.values():
mesh_node = extract_mesh(objects[0], options)
_MESH_MAP[mesh_node.name] = objects
def extracted_meshes():
"""
:return: names of extracted mesh nodes
"""
logger.debug('object.extracted_meshes()')
return [key for key in _MESH_MAP.keys()]
def _on_visible_layer(obj, visible_layers):
"""
:param obj:
:param visible_layers:
"""
is_visible = False
for index, layer in enumerate(obj.layers):
if layer and index in visible_layers:
is_visible = True
break
if not is_visible:
logger.info('%s is on a hidden layer', obj.name)
return is_visible
def _visible_scene_layers():
"""
:return: list of visiible layer indices
"""
visible_layers = []
for index, layer in enumerate(context.scene.layers):
if layer:
visible_layers.append(index)
return visible_layers
def _valid_node(obj, valid_types, options):
"""
:param obj:
:param valid_types:
:param options:
"""
if obj.type not in valid_types:
return False
# skip objects that are not on visible layers
visible_layers = _visible_scene_layers()
if not _on_visible_layer(obj, visible_layers):
return False
try:
export = obj.THREE_export
except AttributeError:
export = True
if not export:
return False
mesh_node = mesh(obj, options)
is_mesh = obj.type == MESH
# skip objects that a mesh could not be resolved
if is_mesh and not mesh_node:
return False
# secondary test; if a mesh node was resolved but no
# faces are detected then bow out
if is_mesh:
mesh_node = data.meshes[mesh_node]
if len(mesh_node.tessfaces) is 0:
return False
# if we get this far assume that the mesh is valid
return True
|
|
"""Provide a way to connect entities belonging to one device."""
from collections import OrderedDict
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union
import attr
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED
from homeassistant.core import Event, callback
import homeassistant.util.uuid as uuid_util
from .debounce import Debouncer
from .singleton import singleton
from .typing import HomeAssistantType
if TYPE_CHECKING:
from . import entity_registry
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_UNDEF = object()
DATA_REGISTRY = "device_registry"
EVENT_DEVICE_REGISTRY_UPDATED = "device_registry_updated"
STORAGE_KEY = "core.device_registry"
STORAGE_VERSION = 1
SAVE_DELAY = 10
CLEANUP_DELAY = 10
CONNECTION_NETWORK_MAC = "mac"
CONNECTION_UPNP = "upnp"
CONNECTION_ZIGBEE = "zigbee"
IDX_CONNECTIONS = "connections"
IDX_IDENTIFIERS = "identifiers"
REGISTERED_DEVICE = "registered"
DELETED_DEVICE = "deleted"
@attr.s(slots=True, frozen=True)
class DeletedDeviceEntry:
"""Deleted Device Registry Entry."""
config_entries: Set[str] = attr.ib()
connections: Set[Tuple[str, str]] = attr.ib()
identifiers: Set[Tuple[str, str]] = attr.ib()
id: str = attr.ib()
def to_device_entry(self):
"""Create DeviceEntry from DeletedDeviceEntry."""
return DeviceEntry(
config_entries=self.config_entries,
connections=self.connections,
identifiers=self.identifiers,
id=self.id,
is_new=True,
)
@attr.s(slots=True, frozen=True)
class DeviceEntry:
"""Device Registry Entry."""
config_entries: Set[str] = attr.ib(converter=set, factory=set)
connections: Set[Tuple[str, str]] = attr.ib(converter=set, factory=set)
identifiers: Set[Tuple[str, str]] = attr.ib(converter=set, factory=set)
manufacturer: str = attr.ib(default=None)
model: str = attr.ib(default=None)
name: str = attr.ib(default=None)
sw_version: str = attr.ib(default=None)
via_device_id: str = attr.ib(default=None)
area_id: str = attr.ib(default=None)
name_by_user: str = attr.ib(default=None)
entry_type: str = attr.ib(default=None)
id: str = attr.ib(factory=uuid_util.uuid_v1mc_hex)
# This value is not stored, just used to keep track of events to fire.
is_new: bool = attr.ib(default=False)
def format_mac(mac: str) -> str:
"""Format the mac address string for entry into dev reg."""
to_test = mac
if len(to_test) == 17 and to_test.count(":") == 5:
return to_test.lower()
if len(to_test) == 17 and to_test.count("-") == 5:
to_test = to_test.replace("-", "")
elif len(to_test) == 14 and to_test.count(".") == 2:
to_test = to_test.replace(".", "")
if len(to_test) == 12:
# no : included
return ":".join(to_test.lower()[i : i + 2] for i in range(0, 12, 2))
# Not sure how formatted, return original
return mac
class DeviceRegistry:
"""Class to hold a registry of devices."""
devices: Dict[str, DeviceEntry]
deleted_devices: Dict[str, DeletedDeviceEntry]
_devices_index: Dict[str, Dict[str, Dict[str, str]]]
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize the device registry."""
self.hass = hass
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._clear_index()
@callback
def async_get(self, device_id: str) -> Optional[DeviceEntry]:
"""Get device."""
return self.devices.get(device_id)
@callback
def async_get_device(
self, identifiers: set, connections: set
) -> Optional[DeviceEntry]:
"""Check if device is registered."""
device_id = self._async_get_device_id_from_index(
REGISTERED_DEVICE, identifiers, connections
)
if device_id is None:
return None
return self.devices[device_id]
def _async_get_deleted_device(
self, identifiers: set, connections: set
) -> Optional[DeletedDeviceEntry]:
"""Check if device is deleted."""
device_id = self._async_get_device_id_from_index(
DELETED_DEVICE, identifiers, connections
)
if device_id is None:
return None
return self.deleted_devices[device_id]
def _async_get_device_id_from_index(
self, index: str, identifiers: set, connections: set
) -> Optional[str]:
"""Check if device has previously been registered."""
devices_index = self._devices_index[index]
for identifier in identifiers:
if identifier in devices_index[IDX_IDENTIFIERS]:
return devices_index[IDX_IDENTIFIERS][identifier]
if not connections:
return None
for connection in _normalize_connections(connections):
if connection in devices_index[IDX_CONNECTIONS]:
return devices_index[IDX_CONNECTIONS][connection]
return None
def _add_device(self, device: Union[DeviceEntry, DeletedDeviceEntry]) -> None:
"""Add a device and index it."""
if isinstance(device, DeletedDeviceEntry):
devices_index = self._devices_index[DELETED_DEVICE]
self.deleted_devices[device.id] = device
else:
devices_index = self._devices_index[REGISTERED_DEVICE]
self.devices[device.id] = device
_add_device_to_index(devices_index, device)
def _remove_device(self, device: Union[DeviceEntry, DeletedDeviceEntry]) -> None:
"""Remove a device and remove it from the index."""
if isinstance(device, DeletedDeviceEntry):
devices_index = self._devices_index[DELETED_DEVICE]
self.deleted_devices.pop(device.id)
else:
devices_index = self._devices_index[REGISTERED_DEVICE]
self.devices.pop(device.id)
_remove_device_from_index(devices_index, device)
def _update_device(self, old_device: DeviceEntry, new_device: DeviceEntry) -> None:
"""Update a device and the index."""
self.devices[new_device.id] = new_device
devices_index = self._devices_index[REGISTERED_DEVICE]
_remove_device_from_index(devices_index, old_device)
_add_device_to_index(devices_index, new_device)
def _clear_index(self):
"""Clear the index."""
self._devices_index = {
REGISTERED_DEVICE: {IDX_IDENTIFIERS: {}, IDX_CONNECTIONS: {}},
DELETED_DEVICE: {IDX_IDENTIFIERS: {}, IDX_CONNECTIONS: {}},
}
def _rebuild_index(self):
"""Create the index after loading devices."""
self._clear_index()
for device in self.devices.values():
_add_device_to_index(self._devices_index[REGISTERED_DEVICE], device)
for device in self.deleted_devices.values():
_add_device_to_index(self._devices_index[DELETED_DEVICE], device)
@callback
def async_get_or_create(
self,
*,
config_entry_id,
connections=None,
identifiers=None,
manufacturer=_UNDEF,
model=_UNDEF,
name=_UNDEF,
default_manufacturer=_UNDEF,
default_model=_UNDEF,
default_name=_UNDEF,
sw_version=_UNDEF,
entry_type=_UNDEF,
via_device=None,
):
"""Get device. Create if it doesn't exist."""
if not identifiers and not connections:
return None
if identifiers is None:
identifiers = set()
if connections is None:
connections = set()
else:
connections = _normalize_connections(connections)
device = self.async_get_device(identifiers, connections)
if device is None:
deleted_device = self._async_get_deleted_device(identifiers, connections)
if deleted_device is None:
device = DeviceEntry(is_new=True)
else:
self._remove_device(deleted_device)
device = deleted_device.to_device_entry()
self._add_device(device)
else:
if default_manufacturer and not device.manufacturer:
manufacturer = default_manufacturer
if default_model and not device.model:
model = default_model
if default_name and not device.name:
name = default_name
if via_device is not None:
via = self.async_get_device({via_device}, set())
via_device_id = via.id if via else _UNDEF
else:
via_device_id = _UNDEF
return self._async_update_device(
device.id,
add_config_entry_id=config_entry_id,
via_device_id=via_device_id,
merge_connections=connections or _UNDEF,
merge_identifiers=identifiers or _UNDEF,
manufacturer=manufacturer,
model=model,
name=name,
sw_version=sw_version,
entry_type=entry_type,
)
@callback
def async_update_device(
self,
device_id,
*,
area_id=_UNDEF,
manufacturer=_UNDEF,
model=_UNDEF,
name=_UNDEF,
name_by_user=_UNDEF,
new_identifiers=_UNDEF,
sw_version=_UNDEF,
via_device_id=_UNDEF,
remove_config_entry_id=_UNDEF,
):
"""Update properties of a device."""
return self._async_update_device(
device_id,
area_id=area_id,
manufacturer=manufacturer,
model=model,
name=name,
name_by_user=name_by_user,
new_identifiers=new_identifiers,
sw_version=sw_version,
via_device_id=via_device_id,
remove_config_entry_id=remove_config_entry_id,
)
@callback
def _async_update_device(
self,
device_id,
*,
add_config_entry_id=_UNDEF,
remove_config_entry_id=_UNDEF,
merge_connections=_UNDEF,
merge_identifiers=_UNDEF,
new_identifiers=_UNDEF,
manufacturer=_UNDEF,
model=_UNDEF,
name=_UNDEF,
sw_version=_UNDEF,
entry_type=_UNDEF,
via_device_id=_UNDEF,
area_id=_UNDEF,
name_by_user=_UNDEF,
):
"""Update device attributes."""
old = self.devices[device_id]
changes = {}
config_entries = old.config_entries
if (
add_config_entry_id is not _UNDEF
and add_config_entry_id not in old.config_entries
):
config_entries = old.config_entries | {add_config_entry_id}
if (
remove_config_entry_id is not _UNDEF
and remove_config_entry_id in config_entries
):
if config_entries == {remove_config_entry_id}:
self.async_remove_device(device_id)
return
config_entries = config_entries - {remove_config_entry_id}
if config_entries is not old.config_entries:
changes["config_entries"] = config_entries
for attr_name, value in (
("connections", merge_connections),
("identifiers", merge_identifiers),
):
old_value = getattr(old, attr_name)
# If not undefined, check if `value` contains new items.
if value is not _UNDEF and not value.issubset(old_value):
changes[attr_name] = old_value | value
if new_identifiers is not _UNDEF:
changes["identifiers"] = new_identifiers
for attr_name, value in (
("manufacturer", manufacturer),
("model", model),
("name", name),
("sw_version", sw_version),
("entry_type", entry_type),
("via_device_id", via_device_id),
):
if value is not _UNDEF and value != getattr(old, attr_name):
changes[attr_name] = value
if area_id is not _UNDEF and area_id != old.area_id:
changes["area_id"] = area_id
if name_by_user is not _UNDEF and name_by_user != old.name_by_user:
changes["name_by_user"] = name_by_user
if old.is_new:
changes["is_new"] = False
if not changes:
return old
new = attr.evolve(old, **changes)
self._update_device(old, new)
self.async_schedule_save()
self.hass.bus.async_fire(
EVENT_DEVICE_REGISTRY_UPDATED,
{
"action": "create" if "is_new" in changes else "update",
"device_id": new.id,
},
)
return new
@callback
def async_remove_device(self, device_id: str) -> None:
"""Remove a device from the device registry."""
device = self.devices[device_id]
self._remove_device(device)
self._add_device(
DeletedDeviceEntry(
config_entries=device.config_entries,
connections=device.connections,
identifiers=device.identifiers,
id=device.id,
)
)
self.hass.bus.async_fire(
EVENT_DEVICE_REGISTRY_UPDATED, {"action": "remove", "device_id": device_id}
)
self.async_schedule_save()
async def async_load(self):
"""Load the device registry."""
async_setup_cleanup(self.hass, self)
data = await self._store.async_load()
devices = OrderedDict()
deleted_devices = OrderedDict()
if data is not None:
for device in data["devices"]:
devices[device["id"]] = DeviceEntry(
config_entries=set(device["config_entries"]),
connections={tuple(conn) for conn in device["connections"]},
identifiers={tuple(iden) for iden in device["identifiers"]},
manufacturer=device["manufacturer"],
model=device["model"],
name=device["name"],
sw_version=device["sw_version"],
# Introduced in 0.110
entry_type=device.get("entry_type"),
id=device["id"],
# Introduced in 0.79
# renamed in 0.95
via_device_id=(
device.get("via_device_id") or device.get("hub_device_id")
),
# Introduced in 0.87
area_id=device.get("area_id"),
name_by_user=device.get("name_by_user"),
)
# Introduced in 0.111
for device in data.get("deleted_devices", []):
deleted_devices[device["id"]] = DeletedDeviceEntry(
config_entries=set(device["config_entries"]),
connections={tuple(conn) for conn in device["connections"]},
identifiers={tuple(iden) for iden in device["identifiers"]},
id=device["id"],
)
self.devices = devices
self.deleted_devices = deleted_devices
self._rebuild_index()
@callback
def async_schedule_save(self) -> None:
"""Schedule saving the device registry."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> Dict[str, List[Dict[str, Any]]]:
"""Return data of device registry to store in a file."""
data = {}
data["devices"] = [
{
"config_entries": list(entry.config_entries),
"connections": list(entry.connections),
"identifiers": list(entry.identifiers),
"manufacturer": entry.manufacturer,
"model": entry.model,
"name": entry.name,
"sw_version": entry.sw_version,
"entry_type": entry.entry_type,
"id": entry.id,
"via_device_id": entry.via_device_id,
"area_id": entry.area_id,
"name_by_user": entry.name_by_user,
}
for entry in self.devices.values()
]
data["deleted_devices"] = [
{
"config_entries": list(entry.config_entries),
"connections": list(entry.connections),
"identifiers": list(entry.identifiers),
"id": entry.id,
}
for entry in self.deleted_devices.values()
]
return data
@callback
def async_clear_config_entry(self, config_entry_id: str) -> None:
"""Clear config entry from registry entries."""
for device in list(self.devices.values()):
self._async_update_device(device.id, remove_config_entry_id=config_entry_id)
for deleted_device in list(self.deleted_devices.values()):
config_entries = deleted_device.config_entries
if config_entry_id not in config_entries:
continue
if config_entries == {config_entry_id}:
# Permanently remove the device from the device registry.
self._remove_device(deleted_device)
else:
config_entries = config_entries - {config_entry_id}
# No need to reindex here since we currently
# do not have a lookup by config entry
self.deleted_devices[deleted_device.id] = attr.evolve(
deleted_device, config_entries=config_entries
)
self.async_schedule_save()
@callback
def async_clear_area_id(self, area_id: str) -> None:
"""Clear area id from registry entries."""
for dev_id, device in self.devices.items():
if area_id == device.area_id:
self._async_update_device(dev_id, area_id=None)
@singleton(DATA_REGISTRY)
async def async_get_registry(hass: HomeAssistantType) -> DeviceRegistry:
"""Create entity registry."""
reg = DeviceRegistry(hass)
await reg.async_load()
return reg
@callback
def async_entries_for_area(registry: DeviceRegistry, area_id: str) -> List[DeviceEntry]:
"""Return entries that match an area."""
return [device for device in registry.devices.values() if device.area_id == area_id]
@callback
def async_entries_for_config_entry(
registry: DeviceRegistry, config_entry_id: str
) -> List[DeviceEntry]:
"""Return entries that match a config entry."""
return [
device
for device in registry.devices.values()
if config_entry_id in device.config_entries
]
@callback
def async_cleanup(
hass: HomeAssistantType,
dev_reg: DeviceRegistry,
ent_reg: "entity_registry.EntityRegistry",
) -> None:
"""Clean up device registry."""
# Find all devices that are referenced by a config_entry.
config_entry_ids = {entry.entry_id for entry in hass.config_entries.async_entries()}
references_config_entries = {
device.id
for device in dev_reg.devices.values()
for config_entry_id in device.config_entries
if config_entry_id in config_entry_ids
}
# Find all devices that are referenced in the entity registry.
references_entities = {entry.device_id for entry in ent_reg.entities.values()}
orphan = set(dev_reg.devices) - references_entities - references_config_entries
for dev_id in orphan:
dev_reg.async_remove_device(dev_id)
# Find all referenced config entries that no longer exist
# This shouldn't happen but have not been able to track down the bug :(
for device in list(dev_reg.devices.values()):
for config_entry_id in device.config_entries:
if config_entry_id not in config_entry_ids:
dev_reg.async_update_device(
device.id, remove_config_entry_id=config_entry_id
)
@callback
def async_setup_cleanup(hass: HomeAssistantType, dev_reg: DeviceRegistry) -> None:
"""Clean up device registry when entities removed."""
from . import entity_registry # pylint: disable=import-outside-toplevel
async def cleanup():
"""Cleanup."""
ent_reg = await entity_registry.async_get_registry(hass)
async_cleanup(hass, dev_reg, ent_reg)
debounced_cleanup = Debouncer(
hass, _LOGGER, cooldown=CLEANUP_DELAY, immediate=False, function=cleanup
)
async def entity_registry_changed(event: Event) -> None:
"""Handle entity updated or removed."""
if (
event.data["action"] == "update"
and "device_id" not in event.data["changes"]
) or event.data["action"] == "create":
return
await debounced_cleanup.async_call()
if hass.is_running:
hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED, entity_registry_changed
)
return
async def startup_clean(event: Event) -> None:
"""Clean up on startup."""
hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED, entity_registry_changed
)
await debounced_cleanup.async_call()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, startup_clean)
def _normalize_connections(connections: set) -> set:
"""Normalize connections to ensure we can match mac addresses."""
return {
(key, format_mac(value)) if key == CONNECTION_NETWORK_MAC else (key, value)
for key, value in connections
}
def _add_device_to_index(
devices_index: dict, device: Union[DeviceEntry, DeletedDeviceEntry]
) -> None:
"""Add a device to the index."""
for identifier in device.identifiers:
devices_index[IDX_IDENTIFIERS][identifier] = device.id
for connection in device.connections:
devices_index[IDX_CONNECTIONS][connection] = device.id
def _remove_device_from_index(
devices_index: dict, device: Union[DeviceEntry, DeletedDeviceEntry]
) -> None:
"""Remove a device from the index."""
for identifier in device.identifiers:
if identifier in devices_index[IDX_IDENTIFIERS]:
del devices_index[IDX_IDENTIFIERS][identifier]
for connection in device.connections:
if connection in devices_index[IDX_CONNECTIONS]:
del devices_index[IDX_CONNECTIONS][connection]
|
|
from tests.test_cases import TestCase
class GetCollectionTestCase(TestCase):
def test_app_should_respond_with_articles(self):
response = self.client.get('/api/articles')
self.assertEqual(response.status_code, 200)
meta = response.document.get('meta', None)
self.assertIsNone(meta)
links = response.document.get('links')
self.assertIsNotNone(links)
self.assertEqual(links.get('self'), 'http://testserver/api/articles')
articles = response.document.get('data')
self.assertIsNotNone(articles)
self.assertIsInstance(articles, list)
self.assertTrue(len(articles) > 0)
included = response.document.get('included', None)
self.assertIsNone(included)
class GetEmptyCollectionTestCase(TestCase):
fixtures = None
def test_app_should_return_empty_array(self):
response = self.client.get('/api/articles')
self.assertEqual(response.status_code, 200)
meta = response.document.get('meta', None)
self.assertIsNone(meta)
links = response.document.get('links')
self.assertIsNotNone(links)
self.assertEqual(links.get('self'), 'http://testserver/api/articles')
articles = response.document.get('data')
self.assertIsNotNone(articles)
self.assertIsInstance(articles, list)
self.assertEqual(len(articles), 0)
included = response.document.get('included', None)
self.assertIsNone(included)
class GetFilteredCollectionTestCase(TestCase):
def test_app_should_respond_with_filtered_articles(self):
response = self.client.get('/api/articles?filter[title:startswith]=Se')
self.assertEqual(response.status_code, 200)
articles = response.document.get('data')
self.assertIsNotNone(articles)
self.assertIsInstance(articles, list)
self.assertEqual(len(articles), 1)
self.assertEqual(articles[0].get('title'), 'Second article')
class GetSortedCollectionTestCase(TestCase):
def test_app_should_respond_with_sorted_articles_asc(self):
response = self.client.get('/api/articles?sort=%2Btitle')
self.assertEqual(response.status_code, 200)
articles = response.document.get('data')
self.assertIsNotNone(articles)
self.assertIsInstance(articles, list)
self.assertTrue(len(articles) > 0)
first = articles[0]
last = articles[len(articles) - 1]
self.assertEqual(first.get('title'), 'Fifth article')
self.assertEqual(last.get('title'), 'Third article')
def test_app_should_respond_with_sorted_articles_desc(self):
response = self.client.get('/api/articles?sort=-title')
self.assertEqual(response.status_code, 200)
articles = response.document.get('data')
self.assertIsNotNone(articles)
self.assertIsInstance(articles, list)
self.assertTrue(len(articles) > 0)
first = articles[0]
last = articles[len(articles) - 1]
self.assertEqual(first.get('title'), 'Third article')
self.assertEqual(last.get('title'), 'Fifth article')
class GetPaginatedCollectionTestCase(TestCase):
def test_app_should_respond_with_paginated_articles(self):
response = self.client.get('/api/articles?page%5Boffset%5D=0&page%5Blimit%5D=2')
self.assertEqual(response.status_code, 200)
articles = response.document.get('data')
self.assertEqual(len(articles), 2)
self.assertEqual(articles[0].get('id'), '1')
self.assertEqual(articles[1].get('id'), '2')
links = response.document.get('links')
self.assertEqual(links.get('first'), 'http://testserver/api/articles?page[offset]=0&page[limit]=2')
self.assertIsNone(links.get('prev'))
self.assertEqual(links.get('next'), 'http://testserver/api/articles?page[offset]=2&page[limit]=2')
self.assertEqual(links.get('last'), 'http://testserver/api/articles?page[offset]=4&page[limit]=2')
response = self.client.get('/api/articles?page%5Boffset%5D=2&page%5Blimit%5D=2')
self.assertEqual(response.status_code, 200)
articles = response.document.get('data')
self.assertEqual(len(articles), 2)
self.assertEqual(articles[0].get('id'), '3')
self.assertEqual(articles[1].get('id'), '4')
links = response.document.get('links')
self.assertEqual(links.get('first'), 'http://testserver/api/articles?page[offset]=0&page[limit]=2')
self.assertEqual(links.get('prev'), 'http://testserver/api/articles?page[offset]=0&page[limit]=2')
self.assertEqual(links.get('next'), 'http://testserver/api/articles?page[offset]=4&page[limit]=2')
self.assertEqual(links.get('last'), 'http://testserver/api/articles?page[offset]=4&page[limit]=2')
response = self.client.get('/api/articles?page%5Boffset%5D=4&page%5Blimit%5D=2')
self.assertEqual(response.status_code, 200)
articles = response.document.get('data')
self.assertEqual(len(articles), 1)
self.assertEqual(articles[0].get('id'), '5')
links = response.document.get('links')
self.assertEqual(links.get('first'), 'http://testserver/api/articles?page[offset]=0&page[limit]=2')
self.assertEqual(links.get('prev'), 'http://testserver/api/articles?page[offset]=2&page[limit]=2')
self.assertIsNone(links.get('next'))
self.assertEqual(links.get('last'), 'http://testserver/api/articles?page[offset]=4&page[limit]=2')
class GetIncludedCollectionTestCase(TestCase):
def test_app_should_respond_with_articles_and_included(self):
response = self.client.get('/api/articles?include=category')
self.assertEqual(response.status_code, 200)
meta = response.document.get('meta', None)
self.assertIsNone(meta)
links = response.document.get('links')
self.assertIsNotNone(links)
self.assertEqual(links.get('self'), 'http://testserver/api/articles')
articles = response.document.get('data')
self.assertIsNotNone(articles)
self.assertIsInstance(articles, list)
self.assertTrue(len(articles) > 0)
included = response.document.get('included', None)
self.assertIsNotNone(included)
[self.assertEqual('categories', item['type']) for item in included]
class GetSparseCollectionTestCase(TestCase):
def test_app_should_respond_with_sparse_articles(self):
response = self.client.get('/api/articles?fields[articles]=title,category')
articles = response.document.get('data')
for article in articles:
self.assertSparseObject(article, (('title',), ('perex',)), (('category',), ('tags',)))
class PostCollectionTestCase(TestCase):
def test_app_should_create_article(self):
data = {
'data': {
'type': 'articles',
'title': 'Lorem ipsum',
'perex': 'Lorem ipsum...',
'links': {
'category': {
'linkage': {
'type': 'categories',
'id': '1'
}
}
}
}
}
response = self.client.post('/api/articles', data)
self.assertEqual(response.status_code, 201)
self.assertEqual(response['Location'], 'http://testserver/api/articles/6')
response = self.client.get('/api/articles/6')
self.assertEquals(response.status_code, 200)
article = response.document.get('data')
self.assertIsNotNone(article)
|
|
'''Test the queue functionality'''
from common import TestQless
class TestJobs(TestQless):
'''We should be able to list jobs in various states for a given queue'''
def test_malformed(self):
'''Enumerate all the ways that the input can be malformed'''
self.assertMalformed(self.lua, [
('jobs', 0, 'complete', 'foo'),
('jobs', 0, 'complete', 0, 'foo'),
('jobs', 0, 'running'),
('jobs', 0, 'running', 'queue', 'foo'),
('jobs', 0, 'running', 'queue', 0, 'foo'),
('jobs', 0, 'stalled'),
('jobs', 0, 'stalled`', 'queue', 'foo'),
('jobs', 0, 'stalled', 'queue', 0, 'foo'),
('jobs', 0, 'scheduled'),
('jobs', 0, 'scheduled', 'queue', 'foo'),
('jobs', 0, 'scheduled', 'queue', 0, 'foo'),
('jobs', 0, 'depends'),
('jobs', 0, 'depends', 'queue', 'foo'),
('jobs', 0, 'depends', 'queue', 0, 'foo'),
('jobs', 0, 'recurring'),
('jobs', 0, 'recurring', 'queue', 'foo'),
('jobs', 0, 'recurring', 'queue', 0, 'foo'),
('jobs', 0, 'foo', 'queue', 0, 25)
])
def test_complete(self):
'''Verify we can list complete jobs'''
jids = map(str, range(10))
for jid in jids:
self.lua('put', jid, 'worker', 'queue', jid, 'klass', {}, 0)
self.lua('pop', jid, 'queue', 'worker', 10)
self.lua('complete', jid, jid, 'worker', 'queue', {})
complete = self.lua('jobs', jid, 'complete')
self.assertEqual(len(complete), int(jid) + 1)
self.assertEqual(complete[0], jid)
def test_running(self):
'''Verify that we can get a list of running jobs in a queue'''
jids = map(str, range(10))
for jid in jids:
self.lua('put', jid, 'worker', 'queue', jid, 'klass', {}, 0)
self.lua('pop', jid, 'queue', 'worker', 10)
running = self.lua('jobs', jid, 'running', 'queue')
self.assertEqual(len(running), int(jid) + 1)
self.assertEqual(running[-1], jid)
def test_stalled(self):
'''Verify that we can get a list of stalled jobs in a queue'''
self.lua('config.set', 0, 'heartbeat', 10)
jids = map(str, range(10))
for jid in jids:
self.lua('put', jid, 'worker', 'queue', jid, 'klass', {}, 0)
self.lua('pop', jid, 'queue', 'worker', 10)
stalled = self.lua('jobs', int(jid) + 20, 'stalled', 'queue')
self.assertEqual(len(stalled), int(jid) + 1)
self.assertEqual(stalled[-1], jid)
def test_scheduled(self):
'''Verify that we can get a list of scheduled jobs in a queue'''
jids = map(str, range(1, 11))
for jid in jids:
self.lua('put', jid, 'worker', 'queue', jid, 'klass', {}, jid)
scheduled = self.lua('jobs', 0, 'scheduled', 'queue')
self.assertEqual(len(scheduled), int(jid))
self.assertEqual(scheduled[-1], jid)
def test_depends(self):
'''Verify that we can get a list of dependent jobs in a queue'''
self.lua('put', 0, 'worker', 'queue', 'a', 'klass', {}, 0)
jids = map(str, range(0, 10))
for jid in jids:
self.lua(
'put', jid, 'worker', 'queue', jid, 'klass', {}, 0, 'depends', ['a'])
depends = self.lua('jobs', 0, 'depends', 'queue')
self.assertEqual(len(depends), int(jid) + 1)
self.assertEqual(depends[-1], jid)
def test_recurring(self):
'''Verify that we can get a list of recurring jobs in a queue'''
jids = map(str, range(0, 10))
for jid in jids:
self.lua(
'recur', jid, 'queue', jid, 'klass', {}, 'interval', 60, 0)
recurring = self.lua('jobs', 0, 'recurring', 'queue')
self.assertEqual(len(recurring), int(jid) + 1)
self.assertEqual(recurring[-1], jid)
def test_recurring_offset(self):
'''Recurring jobs with a future offset should be included'''
jids = map(str, range(0, 10))
for jid in jids:
self.lua(
'recur', jid, 'queue', jid, 'klass', {}, 'interval', 60, 10)
recurring = self.lua('jobs', 0, 'recurring', 'queue')
self.assertEqual(len(recurring), int(jid) + 1)
self.assertEqual(recurring[-1], jid)
def test_scheduled_waiting(self):
'''Jobs that were scheduled but are ready shouldn't be in scheduled'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 10)
self.assertEqual(len(self.lua('jobs', 20, 'scheduled', 'queue')), 0)
def test_pagination_complete(self):
'''Jobs should be able to provide paginated results for complete'''
jids = map(str, range(100))
for jid in jids:
self.lua('put', jid, 'worker', 'queue', jid, 'klass', {}, 0)
self.lua('pop', jid, 'queue', 'worker', 10)
self.lua('complete', jid, jid, 'worker', 'queue', {})
# Get two pages and ensure they're what we expect
jids = list(reversed(jids))
self.assertEqual(
self.lua('jobs', 0, 'complete', 0, 50), jids[:50])
self.assertEqual(
self.lua('jobs', 0, 'complete', 50, 50), jids[50:])
def test_pagination_running(self):
'''Jobs should be able to provide paginated result for running'''
jids = map(str, range(100))
self.lua('config.set', 0, 'heartbeat', 1000)
for jid in jids:
self.lua('put', jid, 'worker', 'queue', jid, 'klass', {}, 0)
self.lua('pop', jid, 'queue', 'worker', 10)
# Get two pages and ensure they're what we expect
self.assertEqual(
self.lua('jobs', 100, 'running', 'queue', 0, 50), jids[:50])
self.assertEqual(
self.lua('jobs', 100, 'running', 'queue', 50, 50), jids[50:])
class TestQueue(TestQless):
'''Test queue info tests'''
expected = {
'name': 'queue',
'paused': False,
'stalled': 0,
'waiting': 0,
'running': 0,
'depends': 0,
'scheduled': 0,
'recurring': 0,
'throttled': 0
}
def setUp(self):
TestQless.setUp(self)
# No grace period
self.lua('config.set', 0, 'grace-period', 0)
def test_stalled(self):
'''Discern stalled job counts correctly'''
expected = dict(self.expected)
expected['stalled'] = 1
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
job = self.lua('pop', 1, 'queue', 'worker', 10)[0]
expires = job['expires'] + 10
self.assertEqual(self.lua('queues', expires, 'queue'), expected)
self.assertEqual(self.lua('queues', expires), [expected])
def test_throttled(self):
'''Discern throttled job counts correctly'''
expected = dict(self.expected)
expected['throttled'] = 1
expected['running'] = 1
self.lua('throttle.set', 0, 'ql:q:queue', 1)
self.lua('put', 1, 'worker', 'queue', 'jid1', 'klass', {}, 0)
self.lua('put', 2, 'worker', 'queue', 'jid2', 'klass', {}, 0)
self.lua('pop', 3, 'queue', 'worker', 10)
self.assertEqual(self.lua('queues', 4, 'queue'), expected)
self.assertEqual(self.lua('queues', 5), [expected])
def test_waiting(self):
'''Discern waiting job counts correctly'''
expected = dict(self.expected)
expected['waiting'] = 1
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.assertEqual(self.lua('queues', 0, 'queue'), expected)
self.assertEqual(self.lua('queues', 0), [expected])
def test_running(self):
'''Discern running job counts correctly'''
expected = dict(self.expected)
expected['running'] = 1
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 1, 'queue', 'worker', 10)
self.assertEqual(self.lua('queues', 0, 'queue'), expected)
self.assertEqual(self.lua('queues', 0), [expected])
def test_depends(self):
'''Discern dependent job counts correctly'''
expected = dict(self.expected)
expected['depends'] = 1
expected['waiting'] = 1
self.lua('put', 0, 'worker', 'queue', 'a', 'klass', {}, 0)
self.lua('put', 0, 'worker', 'queue', 'b', 'klass', {}, 0, 'depends', ['a'])
self.assertEqual(self.lua('queues', 0, 'queue'), expected)
self.assertEqual(self.lua('queues', 0), [expected])
def test_scheduled(self):
'''Discern scheduled job counts correctly'''
expected = dict(self.expected)
expected['scheduled'] = 1
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 10)
self.assertEqual(self.lua('queues', 0, 'queue'), expected)
self.assertEqual(self.lua('queues', 0), [expected])
def test_recurring(self):
'''Discern recurring job counts correctly'''
expected = dict(self.expected)
expected['recurring'] = 1
self.lua('recur', 0, 'queue', 'jid', 'klass', {}, 'interval', 60, 0)
self.assertEqual(self.lua('queues', 0, 'queue'), expected)
self.assertEqual(self.lua('queues', 0), [expected])
def test_recurring_offset(self):
'''Discern future recurring job counts correctly'''
expected = dict(self.expected)
expected['recurring'] = 1
self.lua('recur', 0, 'queue', 'jid', 'klass', {}, 'interval', 60, 10)
self.assertEqual(self.lua('queues', 0, 'queue'), expected)
self.assertEqual(self.lua('queues', 0), [expected])
def test_pause(self):
'''Can pause and unpause a queue'''
jids = map(str, range(10))
for jid in jids:
self.lua('put', 0, 'worker', 'queue', jid, 'klass', {}, 0)
# After pausing, we can't get the jobs, and the state reflects it
self.lua('pause', 0, 'queue')
self.assertEqual(len(self.lua('pop', 0, 'queue', 'worker', 100)), 0)
expected = dict(self.expected)
expected['paused'] = True
expected['waiting'] = 10
self.assertEqual(self.lua('queues', 0, 'queue'), expected)
self.assertEqual(self.lua('queues', 0), [expected])
# Once unpaused, we should be able to pop jobs off
self.lua('unpause', 0, 'queue')
self.assertEqual(len(self.lua('pop', 0, 'queue', 'worker', 100)), 10)
def test_advance(self):
'''When advancing a job to a new queue, queues should know about it'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('complete', 0, 'jid', 'worker', 'queue', {}, 'next', 'another')
expected = dict(self.expected)
expected['name'] = 'another'
expected['waiting'] = 1
self.assertEqual(self.lua('queues', 0), [expected, self.expected])
def test_recurring_move(self):
'''When moving a recurring job, it should add the queue to queues'''
expected = dict(self.expected)
expected['name'] = 'another'
expected['recurring'] = 1
self.lua('recur', 0, 'queue', 'jid', 'klass', {}, 'interval', 60, 0)
self.lua('recur.update', 0, 'jid', 'queue', 'another')
self.assertEqual(self.lua('queues', 0), [expected, self.expected])
def test_scheduled_waiting(self):
'''When checking counts, jobs that /were/ scheduled can be waiting'''
expected = dict(self.expected)
expected['waiting'] = 1
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 10)
self.assertEqual(self.lua('queues', 20), [expected])
self.assertEqual(self.lua('queues', 20, 'queue'), expected)
class TestPut(TestQless):
'''Test putting jobs into a queue'''
# For reference:
#
# put(now, jid, klass, data, delay,
# [priority, p],
# [tags, t],
# [retries, r],
# [depends, '[...]'])
def put(self, *args):
'''Alias for self.lua('put', ...)'''
return self.lua('put', *args)
def test_malformed(self):
'''Enumerate all the ways in which the input can be messed up'''
self.assertMalformed(self.put, [
(12345,), # No queue provided
(12345, 'foo'), # No jid provided
(12345, 'foo', 'bar'), # No klass provided
(12345, 'foo', 'bar', 'whiz'), # No data provided
(12345, 'foo', 'bar', 'whiz',
'{}'), # No delay provided
(12345, 'foo', 'bar', 'whiz',
'{]'), # Malformed data provided
(12345, 'foo', 'bar', 'whiz',
'{}', 'number'), # Malformed delay provided
(12345, 'foo', 'bar', 'whiz', '{}', 1,
'retries'), # Retries arg missing
(12345, 'foo', 'bar', 'whiz', '{}', 1,
'retries', 'foo'), # Retries arg not a number
(12345, 'foo', 'bar', 'whiz', '{}', 1,
'tags'), # Tags arg missing
(12345, 'foo', 'bar', 'whiz', '{}', 1,
'tags', '{]'), # Tags arg malformed
(12345, 'foo', 'bar', 'whiz', '{}', 1,
'priority'), # Priority arg missing
(12345, 'foo', 'bar', 'whiz', '{}', 1,
'priority', 'foo'), # Priority arg malformed
(12345, 'foo', 'bar', 'whiz', '{}', 1,
'depends'), # Depends arg missing
(12345, 'foo', 'bar', 'whiz', '{}', 1,
'depends', '{]') # Depends arg malformed
])
def test_basic(self):
'''We should be able to put and get jobs'''
jid = self.lua('put', 12345, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.assertEqual(jid, 'jid')
# Now we should be able to verify the data we get back
self.assertEqual(self.lua('get', 12345, 'jid'), {
'data': '{}',
'dependencies': {},
'dependents': {},
'expires': 0,
'failure': {},
'history': [{'q': 'queue', 'what': 'put', 'when': 12345}],
'jid': 'jid',
'klass': 'klass',
'priority': 0,
'queue': 'queue',
'remaining': 5,
'retries': 5,
'state': 'waiting',
'tags': {},
'tracked': False,
'throttles': ['ql:q:queue'],
'worker': u'',
'spawned_from_jid': False
})
def test_data_as_array(self):
'''We should be able to provide an array as data'''
# In particular, an empty array should be acceptable, and /not/
# transformed into a dictionary when it returns
self.lua('put', 12345, 'worker', 'queue', 'jid', 'klass', [], 0)
self.assertEqual(self.lua('get', 12345, 'jid')['data'], '[]')
def test_put_delay(self):
'''When we put a job with a delay, it's reflected in its data'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 1)
self.assertEqual(self.lua('get', 0, 'jid')['state'], 'scheduled')
# After the delay, we should be able to pop
self.assertEqual(self.lua('pop', 0, 'queue', 'worker', 10), {})
self.assertEqual(len(self.lua('pop', 2, 'queue', 'worker', 10)), 1)
def test_put_retries(self):
'''Reflects changes to 'retries' '''
self.lua('put', 12345, 'worker', 'queue', 'jid', 'klass', {}, 0, 'retries', 2)
self.assertEqual(self.lua('get', 12345, 'jid')['retries'], 2)
self.assertEqual(self.lua('get', 12345, 'jid')['remaining'], 2)
def test_put_tags(self):
'''When we put a job with tags, it's reflected in its data'''
self.lua('put', 12345, 'worker', 'queue', 'jid', 'klass', {}, 0, 'tags', ['foo'])
self.assertEqual(self.lua('get', 12345, 'jid')['tags'], ['foo'])
def test_put_priority(self):
'''When we put a job with priority, it's reflected in its data'''
self.lua('put', 12345, 'worker', 'queue', 'jid', 'klass', {}, 0, 'priority', 1)
self.assertEqual(self.lua('get', 12345, 'jid')['priority'], 1)
def test_put_depends(self):
'''Dependencies are reflected in job data'''
self.lua('put', 12345, 'worker', 'queue', 'a', 'klass', {}, 0)
self.lua('put', 12345, 'worker', 'queue', 'b', 'klass', {}, 0, 'depends', ['a'])
self.assertEqual(self.lua('get', 12345, 'a')['dependents'], ['b'])
self.assertEqual(self.lua('get', 12345, 'b')['dependencies'], ['a'])
self.assertEqual(self.lua('get', 12345, 'b')['state'], 'depends')
def test_put_depends_with_delay(self):
'''When we put a job with a depends and a delay it is reflected in the job data'''
self.lua('put', 12345, 'worker', 'queue', 'a', 'klass', {}, 0)
self.lua('put', 12345, 'worker', 'queue', 'b', 'klass', {}, 1, 'depends', ['a'])
self.assertEqual(self.lua('get', 12345, 'a')['dependents'], ['b'])
self.assertEqual(self.lua('get', 12345, 'b')['dependencies'], ['a'])
self.assertEqual(self.lua('get', 12345, 'b')['state'], 'depends')
def test_move(self):
'''Move is described in terms of puts.'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {'foo': 'bar'}, 0)
self.assertEqual(self.lua('get', 0, 'jid')['throttles'], ['ql:q:queue'])
self.lua('put', 0, 'worker', 'other', 'jid', 'klass', {'foo': 'bar'}, 0, 'throttles', ['ql:q:queue'])
self.assertEqual(self.lua('get', 1, 'jid'), {
'data': '{"foo": "bar"}',
'dependencies': {},
'dependents': {},
'expires': 0,
'failure': {},
'history': [
{'q': 'queue', 'what': 'put', 'when': 0},
{'q': 'other', 'what': 'put', 'when': 0}],
'jid': 'jid',
'klass': 'klass',
'priority': 0,
'queue': 'other',
'remaining': 5,
'retries': 5,
'state': 'waiting',
'tags': {},
'tracked': False,
'throttles': ['ql:q:other'],
'worker': u'',
'spawned_from_jid': False})
def test_move_update(self):
'''When moving, ensure data's only changed when overridden'''
for key, value, update in [
('priority', 1, 2),
('tags', ['foo'], ['bar']),
('retries', 2, 3)]:
# First, when not overriding the value, it should stay the sam3
# even after moving
self.lua('put', 0, 'worker', 'queue', key, 'klass', {}, 0, key, value)
self.lua('put', 0, 'worker', 'other', key, 'klass', {}, 0)
self.assertEqual(self.lua('get', 0, key)[key], value)
# But if we override it, it should be updated
self.lua('put', 0, 'worker', 'queue', key, 'klass', {}, 0, key, update)
self.assertEqual(self.lua('get', 0, key)[key], update)
# Updating dependecies has to be special-cased a little bit. Without
# overriding dependencies, they should be carried through the move
self.lua('put', 0, 'worker', 'queue', 'a', 'klass', {}, 0)
self.lua('put', 0, 'worker', 'queue', 'b', 'klass', {}, 0)
self.lua('put', 0, 'worker', 'queue', 'c', 'klass', {}, 0, 'depends', ['a'])
self.lua('put', 0, 'worker', 'other', 'c', 'klass', {}, 0)
self.assertEqual(self.lua('get', 0, 'a')['dependents'], ['c'])
self.assertEqual(self.lua('get', 0, 'b')['dependents'], {})
self.assertEqual(self.lua('get', 0, 'c')['dependencies'], ['a'])
# But if we move and update depends, then it should correctly reflect
self.lua('put', 0, 'worker', 'queue', 'c', 'klass', {}, 0, 'depends', ['b'])
self.assertEqual(self.lua('get', 0, 'a')['dependents'], {})
self.assertEqual(self.lua('get', 0, 'b')['dependents'], ['c'])
self.assertEqual(self.lua('get', 0, 'c')['dependencies'], ['b'])
class TestPeek(TestQless):
'''Test peeking jobs'''
# For reference:
#
# QlessAPI.peek = function(now, queue, count)
def test_malformed(self):
'''Enumerate all the ways in which the input can be malformed'''
self.assertMalformed(self.lua, [
('peek', 12345,), # No queue provided
('peek', 12345, 'foo'), # No count provided
('peek', 12345, 'foo', 'number'), # Count arg malformed
])
def test_basic(self):
'''Can peek at a single waiting job'''
# No jobs for an empty queue
self.assertEqual(self.lua('peek', 0, 'foo', 10), {})
self.lua('put', 0, 'worker', 'foo', 'jid', 'klass', {}, 0)
# And now we should see a single job
self.assertEqual(self.lua('peek', 1, 'foo', 10), [{
'data': '{}',
'dependencies': {},
'dependents': {},
'expires': 0,
'failure': {},
'history': [{'q': 'foo', 'what': 'put', 'when': 0}],
'jid': 'jid',
'klass': 'klass',
'priority': 0,
'queue': 'foo',
'remaining': 5,
'retries': 5,
'state': 'waiting',
'tags': {},
'tracked': False,
'throttles': ['ql:q:foo'],
'worker': u'',
'spawned_from_jid': False
}])
# With several jobs in the queue, we should be able to see more
self.lua('put', 2, 'worker', 'foo', 'jid2', 'klass', {}, 0)
self.assertEqual([o['jid'] for o in self.lua('peek', 3, 'foo', 10)], [
'jid', 'jid2'])
def test_priority(self):
'''Peeking honors job priorities'''
# We'll inserts some jobs with different priorities
for jid in xrange(-10, 10):
self.lua(
'put', 0, 'worker', 'queue', jid, 'klass', {}, 0, 'priority', jid)
# Peek at the jobs, and they should be in the right order
jids = [job['jid'] for job in self.lua('peek', 1, 'queue', 100)]
self.assertEqual(jids, map(str, range(9, -11, -1)))
def test_time_order(self):
'''Honor the time that jobs were put, priority constant'''
# Put 100 jobs on with different times
for time in xrange(100):
self.lua('put', time, 'worker', 'queue', time, 'klass', {}, 0)
jids = [job['jid'] for job in self.lua('peek', 200, 'queue', 100)]
self.assertEqual(jids, map(str, range(100)))
def test_move(self):
'''When we move a job, it should be visible in the new, not old'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('put', 0, 'worker', 'other', 'jid', 'klass', {}, 0)
self.assertEqual(self.lua('peek', 1, 'queue', 10), {})
self.assertEqual(self.lua('peek', 1, 'other', 10)[0]['jid'], 'jid')
def test_recurring(self):
'''We can peek at recurring jobs'''
self.lua('recur', 0, 'queue', 'jid', 'klass', {}, 'interval', 10, 0)
self.assertEqual(len(self.lua('peek', 99, 'queue', 100)), 10)
def test_priority_update(self):
'''We can change a job's priority'''
self.lua('put', 0, 'worker', 'queue', 'a', 'klass', {}, 0, 'priority', 0)
self.lua('put', 0, 'worker', 'queue', 'b', 'klass', {}, 0, 'priority', 1)
self.assertEqual(['b', 'a'],
[j['jid'] for j in self.lua('peek', 0, 'queue', 100)])
self.lua('priority', 0, 'a', 2)
self.assertEqual(['a', 'b'],
[j['jid'] for j in self.lua('peek', 0, 'queue', 100)])
class TestPop(TestQless):
'''Test popping jobs'''
# For reference:
#
# QlessAPI.pop = function(now, queue, worker, count)
def test_malformed(self):
'''Enumerate all the ways this can be malformed'''
self.assertMalformed(self.lua, [
('pop', 12345,), # No queue provided
('pop', 12345, 'queue'), # No worker provided
('pop', 12345, 'queue', 'worker'), # No count provided
('pop', 12345, 'queue', 'worker', 'number'), # Malformed count
])
def test_basic(self):
'''Pop some jobs in a simple way'''
# If the queue is empty, you get no jobs
self.assertEqual(self.lua('pop', 0, 'queue', 'worker', 10), {})
# With job put, we can get one back
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.assertEqual(self.lua('pop', 1, 'queue', 'worker', 1), [{
'data': '{}',
'dependencies': {},
'dependents': {},
'expires': 61,
'failure': {},
'history': [{'q': 'queue', 'what': 'put', 'when': 0},
{'what': 'popped', 'when': 1, 'worker': 'worker'}],
'jid': 'jid',
'klass': 'klass',
'priority': 0,
'queue': 'queue',
'remaining': 5,
'retries': 5,
'state': 'running',
'tags': {},
'tracked': False,
'throttles': ['ql:q:queue'],
'worker': 'worker',
'spawned_from_jid': False}])
def test_pop_many(self):
'''We should be able to pop off many jobs'''
for jid in range(10):
self.lua('put', jid, 'worker', 'queue', jid, 'klass', {}, 0)
# This should only pop the first 7
self.assertEqual(
[job['jid'] for job in self.lua('pop', 100, 'queue', 'worker', 7)],
map(str, range(7)))
# This should only leave 3 left
self.assertEqual(
[job['jid'] for job in self.lua('pop', 100, 'queue', 'worker', 10)],
map(str, range(7, 10)))
def test_priority(self):
'''Popping should honor priority'''
# We'll inserts some jobs with different priorities
for jid in xrange(-10, 10):
self.lua(
'put', 0, 'worker', 'queue', jid, 'klass', {}, 0, 'priority', jid)
# Peek at the jobs, and they should be in the right order
jids = [job['jid'] for job in self.lua('pop', 1, 'queue', 'worker', 100)]
self.assertEqual(jids, map(str, range(9, -11, -1)))
def test_time_order(self):
'''Honor the time jobs were inserted, priority held constant'''
# Put 100 jobs on with different times
for time in xrange(100):
self.lua('put', time, 'worker', 'queue', time, 'klass', {}, 0)
jids = [job['jid'] for job in self.lua('pop', 200, 'queue', 'worker', 100)]
self.assertEqual(jids, map(str, range(100)))
def test_move(self):
'''When we move a job, it should be visible in the new, not old'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('put', 0, 'worker', 'other', 'jid', 'klass', {}, 0)
self.assertEqual(self.lua('pop', 1, 'queue', 'worker', 10), {})
self.assertEqual(self.lua('pop', 1, 'other', 'worker', 10)[0]['jid'], 'jid')
def test_max_concurrency(self):
'''We can control the maxinum number of jobs available in a queue'''
self.lua('throttle.set', 0, 'ql:q:queue', 5)
for jid in xrange(10):
self.lua('put', jid, 'worker', 'queue', jid, 'klass', {}, 0)
self.assertEqual(len(self.lua('pop', 10, 'queue', 'worker', 10)), 5)
# But as we complete the jobs, we can pop more
for jid in xrange(5):
self.lua('complete', 10, jid, 'worker', 'queue', {})
self.assertEqual(len(self.lua('pop', 10, 'queue', 'worker', 10)), 1)
def test_reduce_max_concurrency(self):
'''We can reduce max_concurrency at any time'''
# We'll put and pop a bunch of jobs, then restruct concurrency and
# validate that jobs can't be popped until we dip below that level
for jid in xrange(100):
self.lua('put', jid, 'worker', 'queue', jid, 'klass', {}, 0)
self.lua('pop', 100, 'queue', 'worker', 10)
self.lua('throttle.set', 100, 'ql:q:queue', 5)
for jid in xrange(6):
self.assertEqual(
len(self.lua('pop', 100, 'queue', 'worker', 10)), 0)
self.lua('complete', 100, jid, 'worker', 'queue', {})
# And now we should be able to start popping jobs
self.assertEqual(
len(self.lua('pop', 100, 'queue', 'worker', 10)), 1)
def test_stalled_max_concurrency(self):
'''Stalled jobs can still be popped with max concurrency'''
self.lua('throttle.set', 0, 'ql:q:queue', 1)
self.lua('config.set', 0, 'grace-period', 0)
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0, 'retries', 5)
job = self.lua('pop', 0, 'queue', 'worker', 10)[0]
job = self.lua('pop', job['expires'] + 10, 'queue', 'worker', 10)[0]
self.assertEqual(job['jid'], 'jid')
self.assertEqual(job['remaining'], 4)
def test_fail_max_concurrency(self):
'''Failing a job makes space for a job in a queue with concurrency'''
self.lua('throttle.set', 0, 'ql:q:queue', 1)
self.lua('put', 0, 'worker', 'queue', 'a', 'klass', {}, 0)
self.lua('put', 1, 'worker', 'queue', 'b', 'klass', {}, 0)
self.lua('pop', 2, 'queue', 'worker', 10)
self.assertEqual(self.lua('throttle.locks', 3, 'ql:q:queue'), ['a'])
self.assertEqual(self.lua('throttle.pending', 4, 'ql:q:queue'), ['b'])
self.lua('fail', 5, 'a', 'worker', 'group', 'message', {})
job = self.lua('pop', 6, 'queue', 'worker', 10)[0]
self.assertEqual(job['jid'], 'b')
def test_throttled_added(self):
'''New jobs are added to throttled when at concurrency limit'''
self.lua('throttle.set', 0, 'ql:q:queue', 1)
self.lua('put', 0, 'worker', 'queue', 'jid1', 'klass', {}, 0)
self.lua('put', 1, 'worker', 'queue', 'jid2', 'klass', {}, 0)
self.lua('pop', 2, 'queue', 'worker', 2)
self.assertEqual(self.lua('throttle.locks', 3, 'ql:q:queue'), ['jid1'])
self.assertEqual(self.lua('jobs', 4, 'throttled', 'queue'), ['jid2'])
def test_throttled_removed(self):
'''Throttled jobs are removed from throttled when concurrency available'''
self.lua('throttle.set', 0, 'ql:q:queue', 1)
self.lua('put', 0, 'worker', 'queue', 'jid1', 'klass', {}, 0)
self.lua('put', 1, 'worker', 'queue', 'jid2', 'klass', {}, 0)
self.lua('pop', 2, 'queue', 'worker', 2)
self.assertEqual(self.lua('throttle.locks', 3, 'ql:q:queue'), ['jid1'])
self.assertEqual(self.lua('throttle.pending', 4, 'ql:q:queue'), ['jid2'])
self.assertEqual(self.lua('jobs', 5, 'throttled', 'queue'), ['jid2'])
self.assertEqual(self.lua('jobs', 5, 'running', 'queue'), ['jid1'])
self.lua('complete', 7, 'jid1', 'worker', 'queue', {})
self.assertEqual(self.lua('jobs', 8, 'throttled', 'queue'), [])
self.lua('pop', 10, 'queue', 'worker', 1)
self.assertEqual(self.lua('throttle.locks',11, 'ql:q:queue'), ['jid2'])
self.assertEqual(self.lua('throttle.pending', 12, 'ql:q:queue'), [])
self.assertEqual(self.lua('jobs', 13, 'throttled', 'queue'), [])
self.assertEqual(self.lua('jobs', 5, 'running', 'queue'), ['jid2'])
def test_throttled_additional_put(self):
'''put should attempt to throttle the job immediately'''
self.lua('throttle.set', 0, 'ql:q:queue', 1)
self.lua('put', 0, 'worker', 'queue', 'jid1', 'klass', {}, 0)
self.lua('pop', 1, 'queue', 'worker', 1)
self.lua('put', 2, 'worker', 'queue', 'jid2', 'klass', {}, 0)
self.assertEqual(self.lua('throttle.locks', 3, 'ql:q:queue'), ['jid1'])
self.assertEqual(self.lua('jobs', 4, 'throttled', 'queue'), ['jid2'])
def test_pop_no_retry(self):
'''Pop is not retried when limit unset'''
self.lua('throttle.set', 0, 'tid1', 1)
self.lua('throttle.set', 0, 'tid2', 1)
self.lua('put', 0, 'worker', 'queue', 'jid1', 'klass', {}, 0, 'throttles', ['tid1'])
self.lua('put', 1, 'worker', 'queue', 'jid2', 'klass', {}, 0, 'throttles', ['tid1'])
self.lua('put', 2, 'worker', 'queue', 'jid3', 'klass', {}, 0, 'throttles', ['tid2'])
self.lua('put', 3, 'worker', 'queue', 'jid4', 'klass', {}, 0, 'throttles', ['tid2'])
jobs = self.lua('pop', 4, 'queue', 'worker', 2)
self.assertEqual(['jid1'], [job['jid'] for job in jobs])
self.assertEqual(self.lua('throttle.locks', 5, 'tid1'), ['jid1'])
self.assertEqual(self.lua('throttle.locks', 6, 'tid2'), [])
self.assertEqual(self.lua('throttle.pending', 7, 'tid1'), ['jid2'])
waiting_jobs = self.lua('peek', 8, 'queue', 99)
self.assertEqual([job['jid'] for job in waiting_jobs], ['jid3', 'jid4'])
def test_pop_retry(self):
'''Pop is retried when jobs get throttled'''
self.lua('config.set', 0, 'max-pop-retry', 99)
self.lua('throttle.set', 0, 'tid1', 1)
self.lua('throttle.set', 0, 'tid2', 1)
self.lua('put', 0, 'worker', 'queue', 'jid1', 'klass', {}, 0, 'throttles', ['tid1'])
self.lua('put', 1, 'worker', 'queue', 'jid2', 'klass', {}, 0, 'throttles', ['tid1'])
self.lua('put', 2, 'worker', 'queue', 'jid3', 'klass', {}, 0, 'throttles', ['tid2'])
self.lua('put', 3, 'worker', 'queue', 'jid4', 'klass', {}, 0, 'throttles', ['tid2'])
jobs = self.lua('pop', 4, 'queue', 'worker', 2)
self.assertEqual(['jid1', 'jid3'], [job['jid'] for job in jobs])
self.assertEqual(self.lua('throttle.locks', 5, 'tid1'), ['jid1'])
self.assertEqual(self.lua('throttle.locks', 6, 'tid2'), ['jid3'])
self.assertEqual(self.lua('throttle.pending', 7, 'tid1'), ['jid2'])
waiting_jobs = self.lua('peek', 8, 'queue', 99)
self.assertEqual([job['jid'] for job in waiting_jobs], ['jid4'])
def test_pop_retry_queue_config(self):
'''Pop is retried using queue limit if set'''
self.lua('config.set', 0, 'max-pop-retry', 1)
self.lua('config.set', 0, 'queue-max-pop-retry', 2)
self.lua('throttle.set', 0, 'tid1', 1)
self.lua('throttle.set', 0, 'tid2', 1)
self.lua('put', 0, 'worker', 'queue', 'jid1', 'klass', {}, 0, 'throttles', ['tid1'])
self.lua('put', 1, 'worker', 'queue', 'jid2', 'klass', {}, 0, 'throttles', ['tid1'])
self.lua('put', 2, 'worker', 'queue', 'jid3', 'klass', {}, 0, 'throttles', ['tid2'])
self.lua('put', 3, 'worker', 'queue', 'jid4', 'klass', {}, 0, 'throttles', ['tid2'])
jobs = self.lua('pop', 4, 'queue', 'worker', 2)
self.assertEqual(['jid1', 'jid3'], [job['jid'] for job in jobs])
self.assertEqual(self.lua('throttle.locks', 5, 'tid1'), ['jid1'])
self.assertEqual(self.lua('throttle.locks', 6, 'tid2'), ['jid3'])
self.assertEqual(self.lua('throttle.pending', 7, 'tid1'), ['jid2'])
def test_pop_retry_upto_limit(self):
'''Pop is retried up to limit when jobs get throttled'''
self.lua('config.set', 0, 'max-pop-retry', 2)
self.lua('throttle.set', 0, 'tid1', 1)
self.lua('throttle.set', 0, 'tid2', 1)
self.lua('put', 0, 'worker', 'queue', 'jid1', 'klass', {}, 0, 'throttles', ['tid1'])
self.lua('put', 1, 'worker', 'queue', 'jid2', 'klass', {}, 0, 'throttles', ['tid1'])
self.lua('put', 2, 'worker', 'queue', 'jid3', 'klass', {}, 0, 'throttles', ['tid1'])
self.lua('put', 3, 'worker', 'queue', 'jid4', 'klass', {}, 0, 'throttles', ['tid2'])
jobs = self.lua('pop', 4, 'queue', 'worker', 2)
self.assertEqual(['jid1'], [job['jid'] for job in jobs])
self.assertEqual(self.lua('throttle.locks', 5, 'tid1'), ['jid1'])
self.assertEqual(self.lua('throttle.locks', 6, 'tid2'), [])
self.assertEqual(self.lua('throttle.pending', 7, 'tid1'), ['jid2', 'jid3'])
waiting_jobs = self.lua('peek', 8, 'queue', 99)
self.assertEqual([job['jid'] for job in waiting_jobs], ['jid4'])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
from collections import OrderedDict
from snisi_core.indicators import Indicator
from snisi_core.models.Reporting import ExpectedReporting, ReportClass
from snisi_core.models.Periods import MonthPeriod
from snisi_core.models.Entities import Entity
from snisi_nutrition.models.URENI import AggURENINutritionR, URENINutritionR
from snisi_nutrition.models.URENAS import AggURENASNutritionR, URENASNutritionR
from snisi_nutrition.models.URENAM import AggURENAMNutritionR, URENAMNutritionR
from snisi_nutrition.models.Stocks import AggNutritionStocksR, NutritionStocksR
from snisi_nutrition.models.Monthly import AggNutritionR, NutritionR
from snisi_nutrition.models.Caseload import ExpectedCaseload
logger = logging.getLogger(__name__)
report_classes = [
ReportClass.get_or_none("nutrition_monthly_routine"),
ReportClass.get_or_none("nutrition_monthly_routine_aggregated")]
sort_by_name = lambda x: x.name
def compute_sum_value(entity, periods, field, sub_report=None):
rcls = NutritionR if entity.type.slug == 'health_center' else AggNutritionR
reports = rcls.objects.filter(
entity=entity, period__in=periods)
def get(sub_report, field):
if sub_report is None:
return sum([getattr(r, field, 0) for r in reports])
return sum([getattr(getattr(r, '{}_report'.format(sub_report)),
field, 0) for r in reports])
return get(sub_report, field)
def generate_sum_data_for(entity, periods):
is_hc = False
if entity.type.slug == 'health_center':
is_hc = True
rcls = NutritionR
urenamrcls = URENAMNutritionR
urenasrcls = URENASNutritionR
urenircls = URENINutritionR
stocksrcls = NutritionStocksR
else:
urenamrcls = AggURENAMNutritionR
urenasrcls = AggURENASNutritionR
urenircls = AggURENINutritionR
stocksrcls = AggNutritionStocksR
rcls = NutritionR if entity.type.slug == 'health_center' else AggNutritionR
reports = rcls.objects.filter(
entity=entity, period__in=periods)
def get(sub_report, field):
if sub_report is None:
return sum([getattr(r, field, 0) for r in reports])
return sum([getattr(getattr(r, '{}_report'.format(sub_report)),
field, 0) for r in reports])
def recalc_rate(data, field_name):
x = field_name.rsplit('_', 2)
if len(x) <= 2:
prefix = ''
fname = x[0]
else:
prefix = '{}_'.format(x[0])
fname = x[1]
total_out = data['{}total_out'.format(prefix)]
not_responding = data['{}not_responding'.format(prefix)]
out_base = total_out - not_responding
try:
return data['{}{}'.format(prefix, fname)] / out_base
except ZeroDivisionError:
return 0
d = {'ureni_report': {},
'urenas_report': {},
'urenam_report': {},
'stocks_report': {},
'entity': {'slug': entity.slug, 'name': entity.name}}
if not is_hc or entity.has_ureni:
for field in urenircls.all_uren_fields():
d['ureni_report'][field] = get('ureni', field)
# recalc rates as it's not a sum
for field in urenircls.all_uren_fields():
if field.endswith('_rate'):
d['ureni_report'][field] = recalc_rate(d['ureni_report'],
field)
if not is_hc or entity.has_urenas:
for field in urenasrcls.all_uren_fields():
d['urenas_report'][field] = get('urenas', field)
# recalc rates as it's not a sum
for field in urenasrcls.all_uren_fields():
if field.endswith('_rate'):
d['urenas_report'][field] = recalc_rate(d['urenas_report'],
field)
if not is_hc or entity.has_urenam:
for field in urenamrcls.all_uren_fields():
d['urenam_report'][field] = get('urenam', field)
# recalc rates as it's not a sum
for field in urenamrcls.all_uren_fields():
if field.endswith('_rate'):
d['urenam_report'][field] = recalc_rate(d['urenam_report'],
field)
for field in stocksrcls.data_fields():
d['stocks_report'][field] = get('stocks', field)
for field in rcls.all_uren_fields():
d[field] = get(None, field)
return d
def generate_sum_data_table_for(entity, periods):
sl = ['health_area', 'region', 'cercle', 'commune', 'vfq']
entities = set([
Entity.get_or_none(exp['entity'])
for exp in ExpectedReporting.objects.filter(
period__in=periods, report_class__in=report_classes,
entity__slug__in=[
e.slug for e in
entity.casted().get_natural_children(sl)]).values('entity')])
data = {
centity.slug: generate_sum_data_for(entity=centity, periods=periods)
for centity in entities
}
data.update(generate_sum_data_for(entity=entity, periods=periods))
return data
def generate_entity_period_matrix(entity, period, months_data=None):
# TOTAL sum of all periods
is_total = isinstance(period, list)
if is_total:
periods = period
rcls = NutritionR if entity.type.slug == 'health_center' \
else AggNutritionR
reports = rcls.objects.filter(entity=entity, period__in=periods)
expecteds = ExpectedReporting.objects.filter(
entity=entity, period__in=period, report_class__in=report_classes)
expected = {}
report = {}
else:
reports = []
expecteds = ExpectedReporting.objects.filter(
entity=entity, period=period, report_class__in=report_classes)
try:
expected = expecteds.get()
report = expected.arrived_report()
except ExpectedReporting.DoesNotExist:
expected = None
report = None
def get(r, sub_report, field):
if r is None:
return 0
if not is_total:
if sub_report is None:
return getattr(r, field, 0) or 0
return getattr(getattr(r, '{}_report'.format(sub_report)),
field, 0) or 0
if sub_report is None:
return sum([getattr(report, field, 0) or 0 for report in reports])
return sum([getattr(getattr(report, '{}_report'.format(sub_report)),
field, 0) or 0 for report in reports])
def pc(a, b):
try:
return a / b
except ZeroDivisionError:
return 0
def rpc(data, field):
return pc(data["{}__num".format(field)],
data["{}__denum".format(field)])
def gc(value, slug):
blank = Indicator.BLANK
good = Indicator.GOOD
warning = Indicator.WARNING
bad = Indicator.BAD
if slug == 'caseload':
return blank if value >= 0.5 else bad
elif slug == 'healed':
return good if value >= 0.75 else bad
elif slug == 'deceased':
return good if value < 0.10 else bad
elif slug == 'abandon':
return good if value < 0.15 else bad
elif slug == 'ureni':
return good if value >= 0.10 and value <= 0.20 else bad
elif slug == 'urenas':
return good if value >= 0.80 and value <= 0.90 else bad
return ''
if not expecteds.count() or (not is_total and expected is None):
return {'expected': None}
def perf_indic_denum(report, prefix, field):
if report is None:
return 0
if is_total:
return sum([data.get('{}{}_rate__denum'.format(prefix, field), 0)
for data in months_data.values()])
if 'sam_ureni_' in prefix:
report = report.ureni_report
prefix = prefix.replace('sam_ureni_comp_', 'comp_') \
.replace('sam_ureni_', '')
if 'sam_urenas_' in prefix:
report = report.urenas_report
prefix = prefix.replace('sam_urenas_comp_', 'comp_') \
.replace('sam_urenas_', '')
tof = '{}total_out'.format(prefix) \
if prefix is not None else 'total_out'
nof = '{}not_responding'.format(prefix) \
if prefix is not None else 'not_responding'
if isinstance(report, dict):
dtof = report.get(tof, 0)
dnof = report.get(nof, 0)
else:
dtof = getattr(report, tof, 0)
dnof = getattr(report, nof, 0)
return dtof - dnof
def perf_indic_num(report, prefix, field):
if report is None:
return 0
if is_total:
return sum([data.get('{}{}_rate__num'.format(prefix, field), 0)
for data in months_data.values()])
if 'sam_ureni_' in prefix:
report = report.ureni_report
prefix = prefix.replace('sam_ureni_comp_', 'comp_') \
.replace('sam_ureni_', '')
if 'sam_urenas_' in prefix:
report = report.urenas_report
prefix = prefix.replace('sam_urenas_comp_', 'comp_') \
.replace('sam_urenas_', '')
f = '{}{}'.format(prefix, field) \
if prefix != 'all' else '{}'.format(field)
if isinstance(report, dict):
d = data.get(f)
else:
d = getattr(report, f)
return d
data = {
'expected': expected,
'report': report,
'nb_expected': get(report, None, 'nb_source_reports_expected'),
'nb_arrived': get(report, None, 'nb_source_reports_arrived'),
}
data['ureni_nb_expected'] = get(report, 'ureni',
'nb_source_reports_expected')
data['ureni_nb_arrived'] = get(report, 'ureni',
'nb_source_reports_arrived')
data['ureni_completion_rate__num'] = data['ureni_nb_arrived']
data['ureni_completion_rate__denum'] = data['ureni_nb_expected']
data['ureni_completion_rate'] = rpc(data, 'ureni_completion_rate')
# data['ureni_nb_arrived'], data['ureni_nb_expected'])
data['urenas_nb_expected'] = get(report, 'urenas',
'nb_source_reports_expected')
data['urenas_nb_arrived'] = get(report, 'urenas',
'nb_source_reports_arrived')
data['urenas_completion_rate__num'] = data['urenas_nb_arrived']
data['urenas_completion_rate__denum'] = data['urenas_nb_expected']
data['urenas_completion_rate'] = rpc(data, 'urenas_completion_rate')
data['urenam_nb_expected'] = get(report, 'urenam',
'nb_source_reports_expected')
data['urenam_nb_arrived'] = get(report, 'urenam',
'nb_source_reports_arrived')
data['urenam_completion_rate__num'] = data['urenam_nb_arrived']
data['urenam_completion_rate__denum'] = data['urenam_nb_expected']
data['urenam_completion_rate'] = rpc(data, 'urenam_completion_rate')
data['sam_comp_new_cases'] = get(report, None, 'sam_comp_new_cases')
data['sam_comp_caseload_expected'] = get_caseload_expected_for(
period=period, entity=entity, uren='sam')
data['sam_comp_caseload_treated_rate__num'] = data['sam_comp_new_cases']
data['sam_comp_caseload_treated_rate__denum'] = \
data['sam_comp_caseload_expected']
data['sam_comp_caseload_treated_rate'] = rpc(
data, 'sam_comp_caseload_treated_rate')
data['sam_comp_caseload_treated_rate_class'] = gc(
data['sam_comp_caseload_treated_rate'], 'caseload')
# ureni only
data['sam_ureni_comp_healed'] = get(report, 'ureni', 'comp_healed')
data['sam_ureni_comp_abandon'] = get(report, 'ureni', 'comp_abandon')
data['sam_ureni_comp_deceased'] = get(report, 'ureni', 'comp_deceased')
data['sam_ureni_comp_out_base'] = get(report, 'ureni', 'comp_out_base')
data['sam_ureni_comp_healed_rate__num'] = \
perf_indic_num(report, 'sam_ureni_comp_', 'healed')
data['sam_ureni_comp_healed_rate__denum'] = \
perf_indic_denum(report, 'sam_ureni_comp_', 'healed')
data['sam_ureni_comp_healed_rate'] = rpc(
data, 'sam_ureni_comp_healed_rate')
data['sam_ureni_comp_healed_rate_class'] = gc(
data['sam_ureni_comp_healed_rate'], 'healed')
data['sam_ureni_comp_abandon_rate__num'] = \
perf_indic_num(report, 'sam_ureni_comp_', 'abandon')
data['sam_ureni_comp_abandon_rate__denum'] = \
perf_indic_denum(report, 'sam_ureni_comp_', 'abandon')
data['sam_ureni_comp_abandon_rate'] = rpc(
data, 'sam_ureni_comp_abandon_rate')
data['sam_ureni_comp_abandon_rate_class'] = gc(
data['sam_ureni_comp_abandon_rate'], 'abandon')
data['sam_ureni_comp_deceased_rate__num'] = \
perf_indic_num(report, 'sam_ureni_comp_', 'deceased')
data['sam_ureni_comp_deceased_rate__denum'] = \
perf_indic_denum(report, 'sam_ureni_comp_', 'deceased')
data['sam_ureni_comp_deceased_rate'] = rpc(
data, 'sam_ureni_comp_deceased_rate')
data['sam_ureni_comp_deceased_rate_class'] = gc(
data['sam_ureni_comp_deceased_rate'], 'deceased')
####
# ureni only
data['sam_urenas_comp_healed'] = get(report, 'urenas', 'comp_healed')
data['sam_urenas_comp_abandon'] = get(report, 'urenas', 'comp_abandon')
data['sam_urenas_comp_deceased'] = get(report, 'urenas', 'comp_deceased')
data['sam_urenas_comp_out_base'] = get(report, 'urenas', 'comp_out_base')
data['sam_urenas_comp_healed_rate__num'] = \
perf_indic_num(report, 'sam_urenas_comp_', 'healed')
data['sam_urenas_comp_healed_rate__denum'] = \
perf_indic_denum(report, 'sam_urenas_comp_', 'healed')
data['sam_urenas_comp_healed_rate'] = rpc(
data, 'sam_urenas_comp_healed_rate')
data['sam_urenas_comp_healed_rate_class'] = gc(
data['sam_urenas_comp_healed_rate'], 'healed')
data['sam_urenas_comp_abandon_rate__num'] = \
perf_indic_num(report, 'sam_urenas_comp_', 'abandon')
data['sam_urenas_comp_abandon_rate__denum'] = \
perf_indic_denum(report, 'sam_urenas_comp_', 'abandon')
data['sam_urenas_comp_abandon_rate'] = rpc(
data, 'sam_urenas_comp_abandon_rate')
data['sam_urenas_comp_abandon_rate_class'] = gc(
data['sam_urenas_comp_abandon_rate'], 'abandon')
data['sam_urenas_comp_deceased_rate__num'] = \
perf_indic_num(report, 'sam_urenas_comp_', 'deceased')
data['sam_urenas_comp_deceased_rate__denum'] = \
perf_indic_denum(report, 'sam_urenas_comp_', 'deceased')
data['sam_urenas_comp_deceased_rate'] = rpc(
data, 'sam_urenas_comp_deceased_rate')
data['sam_urenas_comp_deceased_rate_class'] = gc(
data['sam_urenas_comp_deceased_rate'], 'deceased')
####
data['sam_ureni_comp_new_cases'] = get(report, 'ureni', 'comp_new_cases')
data['sam_urenas_comp_new_cases'] = get(report, 'urenas', 'comp_new_cases')
data['sam_comp_new_cases'] = get(report, None, 'sam_comp_new_cases')
data['sam_ureni_comp_new_cases_rate__num'] = \
data['sam_ureni_comp_new_cases']
data['sam_ureni_comp_new_cases_rate__denum'] = \
data['sam_comp_new_cases']
data['sam_ureni_comp_new_cases_rate'] = rpc(
data, 'sam_ureni_comp_new_cases_rate')
data['sam_ureni_comp_new_cases_rate_class'] = gc(
data['sam_ureni_comp_new_cases_rate'], 'ureni')
data['sam_urenas_comp_new_cases_rate__num'] = \
data['sam_urenas_comp_new_cases']
data['sam_urenas_comp_new_cases_rate__denum'] = \
data['sam_comp_new_cases']
data['sam_urenas_comp_new_cases_rate'] = rpc(
data, 'sam_urenas_comp_new_cases_rate')
data['sam_urenas_comp_new_cases_rate_class'] = gc(
data['sam_urenas_comp_new_cases_rate'], 'urenas')
data['sam_comp_healed'] = get(report, None, 'sam_comp_healed')
data['sam_comp_abandon'] = get(report, None, 'sam_comp_abandon')
data['sam_comp_deceased'] = get(report, None, 'sam_comp_deceased')
data['sam_comp_out_base'] = get(report, None, 'sam_comp_out_base')
data['sam_comp_healed_rate__num'] = \
perf_indic_num(report, 'sam_comp_', 'healed')
data['sam_comp_healed_rate__denum'] = \
perf_indic_denum(report, 'sam_comp_', 'healed')
data['sam_comp_healed_rate'] = rpc(data, 'sam_comp_healed_rate')
data['sam_comp_healed_rate_class'] = gc(
data['sam_comp_healed_rate'], 'healed')
data['sam_comp_abandon_rate__num'] = \
perf_indic_num(report, 'sam_comp_', 'abandon')
data['sam_comp_abandon_rate__denum'] = \
perf_indic_denum(report, 'sam_comp_', 'abandon')
data['sam_comp_abandon_rate'] = rpc(data, 'sam_comp_abandon_rate')
data['sam_comp_abandon_rate_class'] = gc(
data['sam_comp_abandon_rate'], 'abandon')
data['sam_comp_deceased_rate__num'] = \
perf_indic_num(report, 'sam_comp_', 'deceased')
data['sam_comp_deceased_rate__denum'] = \
perf_indic_denum(report, 'sam_comp_', 'deceased')
data['sam_comp_deceased_rate'] = rpc(data, 'sam_comp_deceased_rate')
data['sam_comp_deceased_rate_class'] = gc(
data['sam_comp_deceased_rate'], 'deceased')
data['mam_comp_new_cases'] = get(report, None, 'mam_comp_new_cases')
data['mam_comp_caseload_expected'] = get_caseload_expected_for(
period=period, entity=entity, uren='mam')
data['mam_comp_caseload_treated_rate__num'] = data['mam_comp_new_cases']
data['mam_comp_caseload_treated_rate__denum'] = \
data['mam_comp_caseload_expected']
data['mam_comp_caseload_treated_rate'] = rpc(
data, 'mam_comp_caseload_treated_rate')
data['mam_comp_caseload_treated_rate_class'] = gc(
data['mam_comp_caseload_treated_rate'], 'caseload')
data['mam_comp_healed'] = get(report, None, 'mam_comp_healed')
data['mam_comp_abandon'] = get(report, None, 'mam_comp_abandon')
data['mam_comp_deceased'] = get(report, None, 'mam_comp_deceased')
data['mam_comp_out_base'] = get(report, None, 'mam_comp_out_base')
data['mam_comp_healed_rate__num'] = \
perf_indic_num(report, 'mam_comp_', 'healed')
data['mam_comp_healed_rate__denum'] = \
perf_indic_denum(report, 'mam_comp_', 'healed')
data['mam_comp_healed_rate'] = rpc(data, 'mam_comp_healed_rate')
data['mam_comp_healed_rate_class'] = gc(
data['mam_comp_healed_rate'], 'healed')
data['mam_comp_abandon_rate__num'] = \
perf_indic_num(report, 'mam_comp_', 'abandon')
data['mam_comp_abandon_rate__denum'] = \
perf_indic_denum(report, 'mam_comp_', 'abandon')
data['mam_comp_abandon_rate'] = rpc(data, 'mam_comp_abandon_rate')
data['mam_comp_abandon_rate_class'] = gc(
data['mam_comp_abandon_rate'], 'abandon')
data['mam_comp_deceased_rate__num'] = \
perf_indic_num(report, 'mam_comp_', 'deceased')
data['mam_comp_deceased_rate__denum'] = \
perf_indic_denum(report, 'mam_comp_', 'deceased')
data['mam_comp_deceased_rate'] = rpc(data, 'mam_comp_deceased_rate')
data['mam_comp_deceased_rate_class'] = gc(
data['mam_comp_deceased_rate'], 'deceased')
return data
def generate_entity_periods_matrix(entity, periods):
data = {
'entity': {'slug': entity.slug, 'name': entity.name},
'periods': OrderedDict([
(period, generate_entity_period_matrix(entity, period))
for period in sorted(periods, key=lambda x: x.start_on)
])
}
data['periods']["TOTAL"] = \
generate_entity_period_matrix(entity, periods, data['periods'])
return data
def generate_entities_periods_matrix(entity, periods):
return OrderedDict([
(centity.slug, generate_entity_periods_matrix(centity, periods))
for centity in sorted(entity.get_health_children(), key=sort_by_name)
] + [
(entity.slug, generate_entity_periods_matrix(entity, periods))
])
def get_caseload_completion_for(period, entity, uren):
periods = MonthPeriod.all_from(
MonthPeriod.find_create_from(period.middle().year, 1, 1), period)
field = '{}_comp_new_cases'.format(uren)
return compute_sum_value(entity=entity, periods=periods, field=field)
def get_caseload_expected_for(period, entity, uren):
if isinstance(period, list):
period = period[-1]
return getattr(ExpectedCaseload.get_or_none_from(
year=period.start_on.year,
entity_slug=entity.slug), 'u59o6_{}'.format(uren), 0)
|
|
""" Class for automated creation of Makefiles """
import os
class Makefile(object):
""" Class for building a Makefile """
def __init__(self, src=None):
self.load(src)
def load(self, src=None):
""" Reinitialize all class members """
self.lines = []
self.source = []
self.compiler = "gcc"
self.libs = []
self.cflags = "-Wall"
self.lflags = ""
self.oflags = "-DNDEBUG -O2"
self.pflags = ""
self.dflags = "-DDEBUG"
self.objpath = ""
self.name = "prog"
self.preset = "Default GCC"
self.pkgconfig = ""
if src != None:
if 'objpath' in src.keys():
self.objpath = src['objpath']
if 'compiler' in src.keys():
self.compiler = src['compiler']
if 'name' in src.keys():
self.name = src['name']
if 'cflags' in src.keys():
self.cflags = src['cflags']
if 'lflags' in src.keys():
self.lflags = src['lflags']
if 'oflags' in src.keys():
self.oflags = src['oflags']
if 'pflags' in src.keys():
self.pflags = src['pflags']
if 'dflags' in src.keys():
self.dflags = src['dflags']
if 'source' in src.keys():
self.source = src['source']
if 'libs' in src.keys():
self.libs = src['libs']
if 'pkgconfig' in src.keys():
self.pkgconfig = src['pkgconfig']
def get_properties(self):
""" Returns the Makefile properties as an dictionary """
pdc = {"name":self.name, "compiler":self.compiler}
if self.objpath != "":
pdc.update({"objpath":self.objpath})
if self.cflags != "":
pdc.update({"cflags":self.cflags})
if self.lflags != "":
pdc.update({"lflags":self.lflags})
if self.oflags != "":
pdc.update({"oflags":self.oflags})
if self.dflags != "":
pdc.update({"dflags":self.dflags})
if self.pflags != "":
pdc.update({"pflags":self.pflags})
if self.libs != "":
pdc.update({"libs":self.libs})
if self.pkgconfig != "":
pdc.update({"pkgconfig":self.pkgconfig})
pdc.update({"source":self.source})
return pdc
def add_header(self):
""" Adds the file header """
self.lines.append("#")
self.lines.append("# Automatic generated Makefile by pymake")
self.lines.append("#")
self.lines.append("# Preset: %s" % self.preset)
self.lines.append("#")
def get_compiler_statement(self, name):
""" Returns the compiler statement """
line = "$(CC) $(CF) -c %s" % name
if self.pkgconfig != "":
line += " `pkg-config --cflags $(PKGCFG)`"
if self.objpath != "":
line += " -o %s" % self.get_obj_name(name)
return line
def get_linker_statement(self):
""" Gets the final linker statement """
if len(self.libs) > 0:
line = "$(CC) $(LF) $(OBJ) -o $(NAME) $(LIBS)"
else:
line = "$(CC) $(LF) $(OBJ) -o $(NAME)"
if self.pkgconfig != "":
line += " `pkg-config --libs $(PKGCFG)`"
return line
def get_obj_name(self, fname):
""" Returns the object name of source file """
fname = os.path.basename(fname)
if fname.endswith(".c"):
fname = fname.replace(".c", ".o")
elif fname.endswith(".cpp"):
fname = fname.replace(".cpp", ".o")
if self.objpath != "":
fname = self.objpath + '/' + fname
return fname
def get_libs(self):
""" Returns the library objects as command line argument for the compiler """
result = ""
for lib in self.libs:
result += "-l%s " % lib
return result[0:len(result)-1]
def add_objects(self):
""" Adds the list of objects to compile """
self.lines.append("## Object file list")
line = "OBJ = "
for src in self.source:
obj = self.get_obj_name(src)
line += obj + ' '
if len(line) > 80:
line += '\\'
self.lines.append(line)
line = '\t'
self.lines.append(line[0:len(line)-1])
def add_new_line(self):
""" Adds a simple new line to the output """
self.lines.append("")
def add_debug_target(self):
""" Adds the debug target to the output """
self.lines.append("debug: CF = -g $(DFLAGS) $(CFLAGS)")
self.lines.append("debug: LF = -g")
self.lines.append("debug: $(NAME)")
def add_release_target(self):
""" Adds the release target to the output """
self.lines.append("release: CF = $(CFLAGS) $(OFLAGS)")
self.lines.append("release: LF = -s")
self.lines.append("release: $(NAME)")
def add_clean_target(self):
""" Adds the clean target """
self.lines.append("clean:")
self.lines.append("\trm $(OBJ)")
def add_phony_targets(self):
""" Adds the phony targets to the Makefile """
self.lines.append(".PHONY: clean")
def add_linker_statement(self):
""" Adds the final linker statement to the output """
self.lines.append("$(NAME): $(OBJ)")
self.lines.append("\t%s" % self.get_linker_statement())
def add_compiler_statements(self):
""" Adds compiler statements for all source modules """
for src in self.source:
obj = self.get_obj_name(src)
self.lines.append("%s: %s" % (obj, src))
self.lines.append("\t%s" % self.get_compiler_statement(src))
def add_variables(self):
""" Adds the variable section to the output """
self.add_new_line()
self.lines.append("## General variables")
self.lines.append("NAME = %s" % self.name)
self.add_new_line()
self.lines.append("## Compiler and flags")
self.lines.append("CC = %s" % self.compiler)
if self.cflags != "":
self.lines.append("CFLAGS = %s" % self.cflags)
if self.lflags != "":
self.lines.append("LFLAGS = %s" % self.lflags)
if self.dflags != "":
self.lines.append("DFLAGS = %s" % self.dflags)
if self.oflags != "":
self.lines.append("OFLAGS = %s" % self.oflags)
if self.pflags != "":
self.lines.append("PFLAGS = %s" % self.pflags)
if self.pkgconfig != "":
self.lines.append("PKGCFG = %s" % self.pkgconfig)
if len(self.libs) > 0:
self.lines.append("LIBS = %s" % self.get_libs())
def __str__(self):
return '\n'.join(self.lines)
def scan(self, root='.', recursive=False):
""" Scans the given directory for source files """
if recursive:
for path, subdirs, files in os.walk(root):
for name in files:
fname = os.path.join(path, name)
if fname.endswith(".c") or fname.endswith(".cpp") or fname.endswith(".cc"):
self.source.append(fname)
else:
for fname in os.listdir(root):
if fname.endswith(".c") or fname.endswith(".cpp") or fname.endswith(".cc"):
self.source.append(fname)
def write(self, fname):
""" Writes the current content to the output file """
with open(fname, "wt") as outf:
outf.write(str(self))
def build(self):
""" Builds the whole file structure """
self.add_header()
self.add_new_line()
self.add_variables()
self.add_new_line()
self.add_objects()
self.add_new_line()
self.add_release_target()
self.add_new_line()
self.add_debug_target()
self.add_new_line()
self.add_linker_statement()
self.add_new_line()
self.add_compiler_statements()
self.add_new_line()
self.add_clean_target()
self.add_new_line()
self.add_phony_targets()
|
|
import json
import uuid
import datetime
import os
import socket
class jsonprocesser:
def __init__(self):
self.client_mac = str(hex(uuid.getnode()))
self.filestamp = datetime.datetime.now().strftime("%H-%M_%d-%m-%y")
self.timestamp = str(datetime.datetime.utcnow())
print self.timestamp
#filename = client_mac + timestamp + '.json'
self.filename = os.path.abspath('results/' + self.client_mac + self.filestamp + '.json')
data = json.dumps({"UserInfo":{"user id":self.client_mac,"timestamp":self.timestamp,"ip":"null","lat":0,"lon":0},
"SpeedTests":{"TCP":{"upload":-1,"download":-1},
"UDP":{"download":{"4k":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1},
"1080p":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1},
"720p":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1},
"420p":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1}},
"upload":{"screensharing":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1},
"standard_video_calling":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1},
"hd_video_calling":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1}},
"2way":{"high_VOIP":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1,"latency":-1},
"low_VOIP":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1,"latency":-1},
"gaming":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1,"latency":-1}}}},
"TRACEROUTE":{}})
jsonFile = open(self.filename, "w+")
jsonFile.write(data)
print self.filename
def json_update_tcp(self, iperf_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["TCP"]["upload"] = iperf_results["tcp_upload"]
data["SpeedTests"]["TCP"]["download"] = iperf_results['tcp_download']
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_4k(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["download"]["4k"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["download"]["4k"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["download"]["4k"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_1080p(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["download"]["1080p"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["download"]["1080p"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["download"]["1080p"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_720p(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["download"]["720p"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["download"]["720p"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["download"]["720p"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_480p(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["download"]["420p"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["download"]["420p"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["download"]["420p"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_screensharing(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["upload"]["screensharing"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["upload"]["screensharing"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["upload"]["screensharing"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_standard_video_calling(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["upload"]["standard_video_calling"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["upload"]["standard_video_calling"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["upload"]["standard_video_calling"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_hd_video_calling(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["upload"]["hd_video_calling"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["upload"]["hd_video_calling"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["upload"]["hd_video_calling"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_high_VOIP(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["2way"]["high_VOIP"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["2way"]["high_VOIP"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["2way"]["high_VOIP"]["jitter_iat"] = udp_results["jitter_iat"]
data["SpeedTests"]["UDP"]["2way"]["high_VOIP"]["latency"] = udp_results["latency"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_low_VOIP(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["2way"]["low_VOIP"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["2way"]["low_VOIP"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["2way"]["low_VOIP"]["jitter_iat"] = udp_results["jitter_iat"]
data["SpeedTests"]["UDP"]["2way"]["low_VOIP"]["latency"] = udp_results["latency"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_gaming(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["2way"]["gaming"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["2way"]["gaming"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["2way"]["gaming"]["jitter_iat"] = udp_results["jitter_iat"]
data["SpeedTests"]["UDP"]["2way"]["gaming"]["latency"] = udp_results["latency"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_upload(self,server_ip,port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = open(self.filename,'rb')
l = f.read(1024)
print l
s.connect((server_ip,port))
while(l):
s.send(l)
l= f.read(1024)
f.close()
#s.shutdown(socket.SHUT_WR)
s.close
def print_json(self):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
print data
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extract UserMetrics "actions" strings from the Chrome source.
This program generates the list of known actions we expect to see in the
user behavior logs. It walks the Chrome source, looking for calls to
UserMetrics functions, extracting actions and warning on improper calls,
as well as generating the lists of possible actions in situations where
there are many possible actions.
See also:
content/browser/user_metrics.h
http://wiki.corp.google.com/twiki/bin/view/Main/ChromeUserExperienceMetrics
If run with a "--hash" argument, chromeactions.txt will be updated.
"""
__author__ = 'evanm (Evan Martin)'
import hashlib
from HTMLParser import HTMLParser
import os
import re
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..', '..', 'tools', 'python'))
from google import path_utils
# Files that are known to use content::RecordComputedAction(), which means
# they require special handling code in this script.
# To add a new file, add it to this list and add the appropriate logic to
# generate the known actions to AddComputedActions() below.
KNOWN_COMPUTED_USERS = (
'back_forward_menu_model.cc',
'options_page_view.cc',
'render_view_host.cc', # called using webkit identifiers
'user_metrics.cc', # method definition
'new_tab_ui.cc', # most visited clicks 1-9
'extension_metrics_module.cc', # extensions hook for user metrics
'safe_browsing_blocking_page.cc', # various interstitial types and actions
'language_options_handler_common.cc', # languages and input methods in CrOS
'cros_language_options_handler.cc', # languages and input methods in CrOS
'about_flags.cc', # do not generate a warning; see AddAboutFlagsActions()
'external_metrics.cc', # see AddChromeOSActions()
'core_options_handler.cc', # see AddWebUIActions()
'browser_render_process_host.cc' # see AddRendererActions()
)
# Language codes used in Chrome. The list should be updated when a new
# language is added to app/l10n_util.cc, as follows:
#
# % (cat app/l10n_util.cc | \
# perl -n0e 'print $1 if /kAcceptLanguageList.*?\{(.*?)\}/s' | \
# perl -nle 'print $1, if /"(.*)"/'; echo 'es-419') | \
# sort | perl -pe "s/(.*)\n/'\$1', /" | \
# fold -w75 -s | perl -pe 's/^/ /;s/ $//'; echo
#
# The script extracts language codes from kAcceptLanguageList, but es-419
# (Spanish in Latin America) is an exception.
LANGUAGE_CODES = (
'af', 'am', 'ar', 'az', 'be', 'bg', 'bh', 'bn', 'br', 'bs', 'ca', 'co',
'cs', 'cy', 'da', 'de', 'de-AT', 'de-CH', 'de-DE', 'el', 'en', 'en-AU',
'en-CA', 'en-GB', 'en-NZ', 'en-US', 'en-ZA', 'eo', 'es', 'es-419', 'et',
'eu', 'fa', 'fi', 'fil', 'fo', 'fr', 'fr-CA', 'fr-CH', 'fr-FR', 'fy',
'ga', 'gd', 'gl', 'gn', 'gu', 'ha', 'haw', 'he', 'hi', 'hr', 'hu', 'hy',
'ia', 'id', 'is', 'it', 'it-CH', 'it-IT', 'ja', 'jw', 'ka', 'kk', 'km',
'kn', 'ko', 'ku', 'ky', 'la', 'ln', 'lo', 'lt', 'lv', 'mk', 'ml', 'mn',
'mo', 'mr', 'ms', 'mt', 'nb', 'ne', 'nl', 'nn', 'no', 'oc', 'om', 'or',
'pa', 'pl', 'ps', 'pt', 'pt-BR', 'pt-PT', 'qu', 'rm', 'ro', 'ru', 'sd',
'sh', 'si', 'sk', 'sl', 'sn', 'so', 'sq', 'sr', 'st', 'su', 'sv', 'sw',
'ta', 'te', 'tg', 'th', 'ti', 'tk', 'to', 'tr', 'tt', 'tw', 'ug', 'uk',
'ur', 'uz', 'vi', 'xh', 'yi', 'yo', 'zh', 'zh-CN', 'zh-TW', 'zu',
)
# Input method IDs used in Chrome OS. The list should be updated when a
# new input method is added to
# chrome/browser/chromeos/input_method/input_methods.txt in the Chrome tree, as
# follows:
#
# % sort chrome/browser/chromeos/input_method/input_methods.txt | \
# perl -ne "print \"'\$1', \" if /^([^#]+?)\s/" | \
# fold -w75 -s | perl -pe 's/^/ /;s/ $//'; echo
#
# The script extracts input method IDs from input_methods.txt.
INPUT_METHOD_IDS = (
'english-m', 'm17n:am:sera', 'm17n:ar:kbd', 'm17n:bn:itrans',
'm17n:fa:isiri', 'm17n:gu:itrans', 'm17n:hi:itrans', 'm17n:kn:itrans',
'm17n:ml:itrans', 'm17n:mr:itrans', 'm17n:ta:inscript', 'm17n:ta:itrans',
'm17n:ta:phonetic', 'm17n:ta:tamil99', 'm17n:ta:typewriter',
'm17n:te:itrans', 'm17n:th:kesmanee', 'm17n:th:pattachote',
'm17n:th:tis820', 'm17n:vi:tcvn', 'm17n:vi:telex', 'm17n:vi:viqr',
'm17n:vi:vni', 'm17n:zh:cangjie', 'm17n:zh:quick', 'mozc', 'mozc-chewing',
'mozc-dv', 'mozc-hangul', 'mozc-jp', 'pinyin', 'pinyin-dv', 'xkb:be::fra',
'xkb:be::ger', 'xkb:be::nld', 'xkb:bg::bul', 'xkb:bg:phonetic:bul',
'xkb:br::por', 'xkb:ca::fra', 'xkb:ca:eng:eng', 'xkb:ch::ger',
'xkb:ch:fr:fra', 'xkb:cz::cze', 'xkb:de::ger', 'xkb:de:neo:ger',
'xkb:dk::dan', 'xkb:ee::est', 'xkb:es::spa', 'xkb:es:cat:cat',
'xkb:fi::fin', 'xkb:fr::fra', 'xkb:gb:dvorak:eng', 'xkb:gb:extd:eng',
'xkb:gr::gre', 'xkb:hr::scr', 'xkb:hu::hun', 'xkb:il::heb', 'xkb:it::ita',
'xkb:jp::jpn', 'xkb:kr:kr104:kor', 'xkb:latam::spa', 'xkb:lt::lit',
'xkb:lv:apostrophe:lav', 'xkb:no::nob', 'xkb:pl::pol', 'xkb:pt::por',
'xkb:ro::rum', 'xkb:rs::srp', 'xkb:ru::rus', 'xkb:ru:phonetic:rus',
'xkb:se::swe', 'xkb:si::slv', 'xkb:sk::slo', 'xkb:tr::tur', 'xkb:ua::ukr',
'xkb:us::eng', 'xkb:us:altgr-intl:eng', 'xkb:us:colemak:eng',
'xkb:us:dvorak:eng', 'xkb:us:intl:eng',
)
number_of_files_total = 0
def AddComputedActions(actions):
"""Add computed actions to the actions list.
Arguments:
actions: set of actions to add to.
"""
# Actions for back_forward_menu_model.cc.
for dir in ('BackMenu_', 'ForwardMenu_'):
actions.add(dir + 'ShowFullHistory')
actions.add(dir + 'Popup')
for i in range(1, 20):
actions.add(dir + 'HistoryClick' + str(i))
actions.add(dir + 'ChapterClick' + str(i))
# Actions for new_tab_ui.cc.
for i in range(1, 10):
actions.add('MostVisited%d' % i)
# Actions for safe_browsing_blocking_page.cc.
for interstitial in ('Phishing', 'Malware', 'Multiple'):
for action in ('Show', 'Proceed', 'DontProceed', 'ForcedDontProceed'):
actions.add('SBInterstitial%s%s' % (interstitial, action))
# Actions for language_options_handler.cc (Chrome OS specific).
for input_method_id in INPUT_METHOD_IDS:
actions.add('LanguageOptions_DisableInputMethod_%s' % input_method_id)
actions.add('LanguageOptions_EnableInputMethod_%s' % input_method_id)
actions.add('InputMethodOptions_Open_%s' % input_method_id)
for language_code in LANGUAGE_CODES:
actions.add('LanguageOptions_UiLanguageChange_%s' % language_code)
actions.add('LanguageOptions_SpellCheckLanguageChange_%s' % language_code)
def AddWebKitEditorActions(actions):
"""Add editor actions from editor_client_impl.cc.
Arguments:
actions: set of actions to add to.
"""
action_re = re.compile(r'''\{ [\w']+, +\w+, +"(.*)" +\},''')
editor_file = os.path.join(path_utils.ScriptDir(), '..', '..', 'webkit',
'api', 'src','EditorClientImpl.cc')
for line in open(editor_file):
match = action_re.search(line)
if match: # Plain call to RecordAction
actions.add(match.group(1))
def AddClosedSourceActions(actions):
"""Add actions that are in code which is not checked out by default
Arguments
actions: set of actions to add to.
"""
actions.add('PDF.PrintPage')
actions.add('PDF.FitToHeightButton')
actions.add('PDF.FitToWidthButton')
actions.add('PDF.LoadFailure')
actions.add('PDF.LoadSuccess')
actions.add('PDF.PreviewDocumentLoadFailure')
actions.add('PDF.ZoomFromBrowser')
actions.add('PDF.ZoomOutButton')
actions.add('PDF.ZoomInButton')
actions.add('PDF_Unsupported_Rights_Management')
actions.add('PDF_Unsupported_XFA')
actions.add('PDF_Unsupported_3D')
actions.add('PDF_Unsupported_Movie')
actions.add('PDF_Unsupported_Sound')
actions.add('PDF_Unsupported_Screen')
actions.add('PDF_Unsupported_Portfolios_Packages')
actions.add('PDF_Unsupported_Attachment')
actions.add('PDF_Unsupported_Digital_Signature')
actions.add('PDF_Unsupported_Shared_Review')
actions.add('PDF_Unsupported_Shared_Form')
actions.add('PDF_Unsupported_Bookmarks')
def AddAndroidActions(actions):
"""Add actions that are used by Chrome on Android.
Arguments
actions: set of actions to add to.
"""
actions.add('MobileBeamCallbackSuccess')
actions.add('MobileBeamInvalidAppState')
actions.add('MobileBreakpadUploadAttempt')
actions.add('MobileBreakpadUploadFailure')
actions.add('MobileBreakpadUploadSuccess')
actions.add('MobileContextMenuCopyImageLinkAddress')
actions.add('MobileContextMenuCopyLinkAddress')
actions.add('MobileContextMenuCopyLinkText')
actions.add('MobileContextMenuImage')
actions.add('MobileContextMenuLink')
actions.add('MobileContextMenuOpenImageInNewTab')
actions.add('MobileContextMenuOpenLink')
actions.add('MobileContextMenuOpenLinkInIncognito')
actions.add('MobileContextMenuOpenLinkInNewTab')
actions.add('MobileContextMenuSaveImage')
actions.add('MobileContextMenuShareLink')
actions.add('MobileContextMenuText')
actions.add('MobileContextMenuViewImage')
actions.add('MobileFreAttemptSignIn')
actions.add('MobileFreSignInSuccessful')
actions.add('MobileFreSkipSignIn')
actions.add('MobileMenuAddToBookmarks')
actions.add('MobileMenuAllBookmarks')
actions.add('MobileMenuBack')
actions.add('MobileMenuCloseAllTabs')
actions.add('MobileMenuCloseTab')
actions.add('MobileMenuFeedback')
actions.add('MobileMenuFindInPage')
actions.add('MobileMenuForward')
actions.add('MobileMenuFullscreen')
actions.add('MobileMenuNewIncognitoTab')
actions.add('MobileMenuNewTab')
actions.add('MobileMenuOpenTabs')
actions.add('MobileMenuQuit')
actions.add('MobileMenuReload')
actions.add('MobileMenuSettings')
actions.add('MobileMenuShare')
actions.add('MobileMenuShow')
actions.add('MobileNTPBookmark')
actions.add('MobileNTPForeignSession')
actions.add('MobileNTPMostVisited')
actions.add('MobileNTPSwitchToBookmarks')
actions.add('MobileNTPSwitchToIncognito')
actions.add('MobileNTPSwitchToMostVisited')
actions.add('MobileNTPSwitchToOpenTabs')
actions.add('MobileNewTabOpened')
actions.add('MobileOmniboxSearch')
actions.add('MobileOmniboxVoiceSearch')
actions.add('MobilePageLoaded')
actions.add('MobilePageLoadedDesktopUserAgent')
actions.add('MobilePageLoadedWithKeyboard')
actions.add('MobileReceivedExternalIntent')
actions.add('MobileRendererCrashed')
actions.add('MobileShortcutAllBookmarks')
actions.add('MobileShortcutFindInPage')
actions.add('MobileShortcutNewIncognitoTab')
actions.add('MobileShortcutNewTab')
actions.add('MobileSideSwipeFinished')
actions.add('MobileStackViewCloseTab')
actions.add('MobileStackViewSwipeCloseTab')
actions.add('MobileTabClobbered')
actions.add('MobileTabClosed')
actions.add('MobileTabStripCloseTab')
actions.add('MobileTabStripNewTab')
actions.add('MobileTabSwitched')
actions.add('MobileToolbarBack')
actions.add('MobileToolbarForward')
actions.add('MobileToolbarReload')
actions.add('MobileToolbarShowMenu')
actions.add('MobileToolbarShowStackView')
actions.add('MobileToolbarStackViewNewTab')
actions.add('MobileToolbarToggleBookmark')
actions.add('SystemBack')
actions.add('SystemBackForNavigation')
def AddAboutFlagsActions(actions):
"""This parses the experimental feature flags for UMA actions.
Arguments:
actions: set of actions to add to.
"""
about_flags = os.path.join(path_utils.ScriptDir(), '..', 'browser',
'about_flags.cc')
flag_name_re = re.compile(r'\s*"([0-9a-zA-Z\-_]+)",\s*// FLAGS:RECORD_UMA')
for line in open(about_flags):
match = flag_name_re.search(line)
if match:
actions.add("AboutFlags_" + match.group(1))
# If the line contains the marker but was not matched by the regex, put up
# an error if the line is not a comment.
elif 'FLAGS:RECORD_UMA' in line and line[0:2] != '//':
print >>sys.stderr, 'WARNING: This line is marked for recording ' + \
'about:flags metrics, but is not in the proper format:\n' + line
def AddChromeOSActions(actions):
"""Add actions reported by non-Chrome processes in Chrome OS.
Arguments:
actions: set of actions to add to.
"""
# Actions sent by Chrome OS update engine.
actions.add('Updater.ServerCertificateChanged')
actions.add('Updater.ServerCertificateFailed')
# Actions sent by Chrome OS cryptohome.
actions.add('Cryptohome.PKCS11InitFail')
def AddExtensionActions(actions):
"""Add actions reported by extensions via chrome.metricsPrivate API.
Arguments:
actions: set of actions to add to.
"""
# Actions sent by Chrome OS File Browser.
actions.add('FileBrowser.CreateNewFolder')
actions.add('FileBrowser.PhotoEditor.Edit')
actions.add('FileBrowser.PhotoEditor.View')
def GrepForActions(path, actions):
"""Grep a source file for calls to UserMetrics functions.
Arguments:
path: path to the file
actions: set of actions to add to
"""
global number_of_files_total
number_of_files_total = number_of_files_total + 1
# we look for the UserMetricsAction structure constructor
# this should be on one line
action_re = re.compile(r'[^a-zA-Z]UserMetricsAction\("([^"]*)')
malformed_action_re = re.compile(r'[^a-zA-Z]UserMetricsAction\([^"]')
computed_action_re = re.compile(r'RecordComputedAction')
line_number = 0
for line in open(path):
line_number = line_number + 1
match = action_re.search(line)
if match: # Plain call to RecordAction
actions.add(match.group(1))
elif malformed_action_re.search(line):
# Warn if this line is using RecordAction incorrectly.
print >>sys.stderr, ('WARNING: %s has malformed call to RecordAction'
' at %d' % (path, line_number))
elif computed_action_re.search(line):
# Warn if this file shouldn't be calling RecordComputedAction.
if os.path.basename(path) not in KNOWN_COMPUTED_USERS:
print >>sys.stderr, ('WARNING: %s has RecordComputedAction at %d' %
(path, line_number))
class WebUIActionsParser(HTMLParser):
"""Parses an HTML file, looking for all tags with a 'metric' attribute.
Adds user actions corresponding to any metrics found.
Arguments:
actions: set of actions to add to
"""
def __init__(self, actions):
HTMLParser.__init__(self)
self.actions = actions
def handle_starttag(self, tag, attrs):
# We only care to examine tags that have a 'metric' attribute.
attrs = dict(attrs)
if not 'metric' in attrs:
return
# Boolean metrics have two corresponding actions. All other metrics have
# just one corresponding action. By default, we check the 'dataType'
# attribute.
is_boolean = ('dataType' in attrs and attrs['dataType'] == 'boolean')
if 'type' in attrs and attrs['type'] in ('checkbox', 'radio'):
if attrs['type'] == 'checkbox':
is_boolean = True
else:
# Radio buttons are boolean if and only if their values are 'true' or
# 'false'.
assert(attrs['type'] == 'radio')
if 'value' in attrs and attrs['value'] in ['true', 'false']:
is_boolean = True
if is_boolean:
self.actions.add(attrs['metric'] + '_Enable')
self.actions.add(attrs['metric'] + '_Disable')
else:
self.actions.add(attrs['metric'])
def GrepForWebUIActions(path, actions):
"""Grep a WebUI source file for elements with associated metrics.
Arguments:
path: path to the file
actions: set of actions to add to
"""
try:
parser = WebUIActionsParser(actions)
parser.feed(open(path).read())
except Exception, e:
print "Error encountered for path %s" % path
raise e
finally:
parser.close()
def WalkDirectory(root_path, actions, extensions, callback):
for path, dirs, files in os.walk(root_path):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for file in files:
ext = os.path.splitext(file)[1]
if ext in extensions:
callback(os.path.join(path, file), actions)
def GrepForRendererActions(path, actions):
"""Grep a source file for calls to RenderThread::RecordUserMetrics.
Arguments:
path: path to the file
actions: set of actions to add to
"""
# We look for the ViewHostMsg_UserMetricsRecordAction constructor.
# This should be on one line.
action_re = re.compile(
r'[^a-zA-Z]RenderThread::RecordUserMetrics\("([^"]*)')
line_number = 0
for line in open(path):
match = action_re.search(line)
if match: # Plain call to RecordAction
actions.add(match.group(1))
def AddLiteralActions(actions):
"""Add literal actions specified via calls to UserMetrics functions.
Arguments:
actions: set of actions to add to.
"""
EXTENSIONS = ('.cc', '.mm', '.c', '.m')
# Walk the source tree to process all .cc files.
chrome_root = os.path.normpath(os.path.join(path_utils.ScriptDir(), '..'))
WalkDirectory(chrome_root, actions, EXTENSIONS, GrepForActions)
content_root = os.path.normpath(os.path.join(path_utils.ScriptDir(),
'..', '..', 'content'))
WalkDirectory(content_root, actions, EXTENSIONS, GrepForActions)
webkit_root = os.path.normpath(os.path.join(path_utils.ScriptDir(),
'..', '..', 'webkit'))
WalkDirectory(os.path.join(webkit_root, 'glue'), actions, EXTENSIONS,
GrepForActions)
WalkDirectory(os.path.join(webkit_root, 'port'), actions, EXTENSIONS,
GrepForActions)
def AddWebUIActions(actions):
"""Add user actions defined in WebUI files.
Arguments:
actions: set of actions to add to.
"""
resources_root = os.path.join(path_utils.ScriptDir(), '..', 'browser',
'resources')
WalkDirectory(resources_root, actions, ('.html'), GrepForWebUIActions)
def AddRendererActions(actions):
"""Add user actions sent via calls to RenderThread::RecordUserMetrics.
Arguments:
actions: set of actions to add to.
"""
EXTENSIONS = ('.cc', '.mm', '.c', '.m')
chrome_renderer_root = os.path.join(path_utils.ScriptDir(), '..', 'renderer')
content_renderer_root = os.path.join(path_utils.ScriptDir(), '..', '..',
'content', 'renderer')
WalkDirectory(chrome_renderer_root, actions, EXTENSIONS,
GrepForRendererActions)
WalkDirectory(content_renderer_root, actions, EXTENSIONS,
GrepForRendererActions)
def main(argv):
if '--hash' in argv:
hash_output = True
else:
hash_output = False
print >>sys.stderr, "WARNING: If you added new UMA tags, you must" + \
" use the --hash option to update chromeactions.txt."
# if we do a hash output, we want to only append NEW actions, and we know
# the file we want to work on
actions = set()
chromeactions_path = os.path.join(path_utils.ScriptDir(), "chromeactions.txt")
if hash_output:
f = open(chromeactions_path)
for line in f:
part = line.rpartition("\t")
part = part[2].strip()
actions.add(part)
f.close()
AddComputedActions(actions)
# TODO(fmantek): bring back webkit editor actions.
# AddWebKitEditorActions(actions)
AddAboutFlagsActions(actions)
AddWebUIActions(actions)
AddRendererActions(actions)
AddLiteralActions(actions)
# print "Scanned {0} number of files".format(number_of_files_total)
# print "Found {0} entries".format(len(actions))
AddClosedSourceActions(actions)
AddChromeOSActions(actions)
AddExtensionActions(actions)
AddAndroidActions(actions)
if hash_output:
f = open(chromeactions_path, "w")
# Print out the actions as a sorted list.
for action in sorted(actions):
if hash_output:
hash = hashlib.md5()
hash.update(action)
print >>f, '0x%s\t%s' % (hash.hexdigest()[:16], action)
else:
print action
if hash_output:
print "Done. Do not forget to add chromeactions.txt to your changelist"
return 0
if '__main__' == __name__:
sys.exit(main(sys.argv))
|
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import re
import maya.OpenMaya
import maya.cmds
import IECore
import IECoreMaya
import _IECoreMaya
from FnDagNode import FnDagNode
import StringUtil
## A function set for operating on the IECoreMaya::SceneShape type.
class FnSceneShape( maya.OpenMaya.MFnDependencyNode ) :
## Initialise the function set for the given procedural object, which may
# either be an MObject or a node name in string or unicode form.
def __init__( self, object ) :
if isinstance( object, str ) or isinstance( object, unicode ) :
object = StringUtil.dependencyNodeFromString( object )
maya.OpenMaya.MFnDependencyNode.__init__( self, object )
## Creates a new node under a transform of the specified name. Returns a function set instance operating on this new node.
@staticmethod
def create( parentName ) :
try:
parentNode = maya.cmds.createNode( "transform", name=parentName, skipSelect=True )
except:
# The parent name is supposed to be the children names in a sceneInterface, they could be numbers, maya doesn't like that. Use a prefix.
parentNode = maya.cmds.createNode( "transform", name="sceneShape_"+parentName, skipSelect=True )
return FnSceneShape.createShape( parentNode )
## Create a scene shape under the given node. Returns a function set instance operating on this shape.
@staticmethod
def createShape( parentNode ) :
parentShort = parentNode.rpartition( "|" )[-1]
numbersMatch = re.search( "[0-9]+$", parentShort )
if numbersMatch is not None :
numbers = numbersMatch.group()
shapeName = parentShort[:-len(numbers)] + "SceneShape" + numbers
else :
shapeName = parentShort + "SceneShape"
shapeNode = maya.cmds.createNode( "ieSceneShape", name=shapeName, parent=parentNode, skipSelect=True )
fnScS = FnSceneShape( shapeNode )
maya.cmds.sets( fnScS.fullPathName(), add="initialShadingGroup" )
maya.cmds.setAttr( fnScS.fullPathName()+".objectOnly", l=True )
maya.cmds.connectAttr( "time1.outTime", fnScS.fullPathName()+'.time' )
return fnScS
## Returns a set of the names of any currently selected components.
def selectedComponentNames( self ) :
result = set()
s = maya.OpenMaya.MSelectionList()
maya.OpenMaya.MGlobal.getActiveSelectionList( s )
allComponents = self.componentNames()
fullPathName = self.fullPathName()
for i in range( 0, s.length() ) :
try :
p = maya.OpenMaya.MDagPath()
c = maya.OpenMaya.MObject()
s.getDagPath( i, p, c )
if p.node()==self.object() :
fnC = maya.OpenMaya.MFnSingleIndexedComponent( c )
a = maya.OpenMaya.MIntArray()
fnC.getElements( a )
for j in range( 0, a.length() ) :
result.add( allComponents[ a[j] ] )
except :
pass
return result
## Selects the components specified by the passed names. If replace is True
# then the current selection is deselected first.
def selectComponentNames( self, componentNames ) :
if not isinstance( componentNames, set ) :
componentNames = set( componentNames )
fullPathName = self.fullPathName()
allnames = self.componentNames()
for i, name in enumerate( allNames ):
if name in componentNames:
toSelect.append( fullPathName + ".f[" + str( i ) + "]" )
maya.cmds.select( clear=True )
maya.cmds.selectMode( component=True )
maya.cmds.hilite( fullPathName )
for s in toSelect :
maya.cmds.select( s, add=True )
## Returns the full path name to this node.
def fullPathName( self ) :
try :
f = maya.OpenMaya.MFnDagNode( self.object() )
return f.fullPathName()
except :
pass
return self.name()
def sceneInterface( self ) :
return _IECoreMaya._sceneShapeSceneInterface( self )
def componentNames( self ) :
return _IECoreMaya._sceneShapeComponentNames( self )
## Returns True if the scene shape can be expanded.
# We assume that if the objectOnly flag is on, it means the scene shape has already been expanded so return False.
# Can only be expanded if the scene interface for the scene shape has children.
def canBeExpanded( self ) :
# An already expanded scene should have objectOnly on
if not maya.cmds.getAttr( self.fullPathName()+".objectOnly" ):
# Check if you have any children to expand to
if self.sceneInterface().childNames():
return True
return False
## Returns True if the scene shape can be collapsed.
# We assume that if the objectOnly flag is off, the scene shape is already collapsed.
def canBeCollapsed( self ) :
# if already collapsed, objectOnly is off
return maya.cmds.getAttr( self.fullPathName()+".objectOnly" )
## Returns the index in the queryPaths which matches the given path.
# If the path isn't already in the queries, add it and return the new index.
def __queryIndexForPath( self, path ):
node = self.fullPathName()
index = None
validIndices = maya.cmds.getAttr( node+".queryPaths", mi=True )
if not validIndices:
index = 0
else:
for id in validIndices:
# Check if we can reuse a query path
if maya.cmds.getAttr( node+".queryPaths["+str(id)+"]" ) == path:
index = id
break
if index is None:
# Didn't find path, get the next available index
index = max( i for i in validIndices ) +1
maya.cmds.setAttr( node+".queryPaths["+str(index)+"]", path, type="string" )
return index
## Expands the scene shape one level down if possible.
# Returns a list of function sets for the child scene shapes.
# Missing child transforms and shapes will be created, missing connections and attribute values will be reset.
def expandOnce( self ) :
node = self.fullPathName()
transform = maya.cmds.listRelatives( node, parent=True, f=True )[0]
scene = self.sceneInterface()
if not scene:
return []
sceneChildren = scene.childNames()
if sceneChildren == []:
# No children to expand to
return []
sceneFile = maya.cmds.getAttr( node+".file" )
sceneRoot = maya.cmds.getAttr( node+".root" )
maya.cmds.setAttr( node+".querySpace", 1 )
maya.cmds.setAttr( node+".objectOnly", l=False )
maya.cmds.setAttr( node+".objectOnly", 1 )
maya.cmds.setAttr( node+".objectOnly", l=True )
drawGeo = maya.cmds.getAttr( node+".drawGeometry" )
drawChildBounds = maya.cmds.getAttr( node+".drawChildBounds" )
drawRootBound = maya.cmds.getAttr( node+".drawRootBound" )
drawTagsFilter = maya.cmds.getAttr( node+".drawTagsFilter" )
newSceneShapeFns = []
for i, child in enumerate( sceneChildren ):
if maya.cmds.objExists( transform+"|"+child ):
shape = maya.cmds.listRelatives( transform+"|"+child, f=True, type="ieSceneShape" )
if shape:
fnChild = IECoreMaya.FnSceneShape( shape[0] )
else:
fnChild = IECoreMaya.FnSceneShape.createShape( transform+"|"+child )
else:
fnChild = IECoreMaya.FnSceneShape.create( child )
childNode = fnChild.fullPathName()
childTransform = maya.cmds.listRelatives( childNode, parent=True, f=True )[0]
maya.cmds.setAttr( childNode+".file", sceneFile, type="string" )
sceneRootName = "/"+child if sceneRoot == "/" else sceneRoot+"/"+child
maya.cmds.setAttr( childNode+".root", sceneRootName, type="string" )
index = self.__queryIndexForPath( "/"+child )
outTransform = node+".outTransform["+str(index)+"]"
if not maya.cmds.isConnected( outTransform+".outTranslate", childTransform+".translate" ):
maya.cmds.connectAttr( outTransform+".outTranslate", childTransform+".translate", f=True )
if not maya.cmds.isConnected( outTransform+".outRotate", childTransform+".rotate" ):
maya.cmds.connectAttr( outTransform+".outRotate", childTransform+".rotate", f=True )
if not maya.cmds.isConnected( outTransform+".outScale", childTransform+".scale" ):
maya.cmds.connectAttr( outTransform+".outScale", childTransform+".scale", f=True )
maya.cmds.setAttr( childNode+".drawGeometry", drawGeo )
maya.cmds.setAttr( childNode+".drawChildBounds", drawChildBounds )
maya.cmds.setAttr( childNode+".drawRootBound", drawRootBound )
if drawTagsFilter:
parentTags = drawTagsFilter.split()
childTags = fnChild.sceneInterface().readTags()
commonTags = filter( lambda x: str(x) in childTags, parentTags )
if not commonTags:
# Hide that child since it doesn't match any filter
maya.cmds.setAttr( childTransform+".visibility", 0 )
else:
maya.cmds.setAttr( childNode+".drawTagsFilter", " ".join(commonTags),type="string" )
if maya.cmds.listRelatives( childTransform, parent = True, f=True ) != [ transform ]:
maya.cmds.parent( childTransform, transform, relative=True )
newSceneShapeFns.append( fnChild )
return newSceneShapeFns
## Recursively expands all levels starting from the scene shape.
# Returns a list of function sets for all the child scene shapes.
def expandAll( self ):
newFn = []
def recursiveExpand( fnSceneShape ):
new = fnSceneShape.expandOnce()
newFn.extend( new )
for n in new:
recursiveExpand( n )
recursiveExpand( self )
return newFn
## Collapses all children up to this scene shape.
def collapse( self ) :
node = self.fullPathName()
transform = maya.cmds.listRelatives( node, parent=True, f=True )[0]
allTransformChildren = maya.cmds.listRelatives( transform, f=True, type = "transform" ) or []
for child in allTransformChildren:
# Do a bunch of tests first!
maya.cmds.delete( child )
maya.cmds.setAttr( node+".objectOnly", l=False )
maya.cmds.setAttr( node+".objectOnly", 0 )
maya.cmds.setAttr( node+".objectOnly", l=True )
maya.cmds.setAttr( node+".intermediateObject", 0 )
## Returns tuple of maya type and input plug name that match the object in the scene interface, by checking the objectType tags.
# Returns (None, None) if no object in the scene interface or the object isn't compatible with maya geometry we can create.
def __mayaCompatibleShapeAndPlug( self ) :
result = (None, None)
if self.sceneInterface().hasObject():
tags = self.sceneInterface().readTags( includeChildren=False )
if "ObjectType:MeshPrimitive" in tags:
result = ( "mesh", "inMesh" )
elif "ObjectType:CurvesPrimitive" in tags:
result = ( "nurbsCurve", "create" )
elif "ObjectType:CoordinateSystem" in tags:
result = ( "locator", "localPosition" )
return result
## Recursively converts all objects in the scene interface to compatible maya geometry
# All scene shape nodes in the hierarchy are turned into an intermediate object.
def convertAllToGeometry( self ) :
# Expand scene first, then for each scene shape we turn them into an intermediate object and connect a mesh
self.expandAll()
transform = maya.cmds.listRelatives( self.fullPathName(), parent=True, f=True )[0]
allSceneShapes = maya.cmds.listRelatives( transform, ad=True, f=True, type="ieSceneShape" )
for sceneShape in allSceneShapes:
maya.cmds.setAttr( sceneShape+".querySpace", 1 )
fn = FnSceneShape( sceneShape )
if fn.sceneInterface() and fn.sceneInterface().hasObject():
fn.convertObjectToGeometry()
# turn the scene node an intermediateObject so it can't be seen by MayaScene
maya.cmds.setAttr( sceneShape+".intermediateObject", 1 )
## Converts the object (if any) in the scene interface into maya geometry.
# If a shape with the expected name but incompatible type is found under the transform, we rename it and create a new proper shape.
# The shape is connected to the scene shape object output only if it isn't already connected or locked.
# transformNode parameter can be used to specify the parent of the geometry. If None, uses the transform of the scene shape.
def convertObjectToGeometry( self, transformNode = None ):
if not self.sceneInterface().hasObject():
return
node = self.fullPathName()
if not transformNode:
# No transform provided, use the transform of the reader
transformNode = maya.cmds.listRelatives( node, f=True, p=True )[0]
type, plug = self.__mayaCompatibleShapeAndPlug()
if not (type and plug):
raise Exception, "Scene interface at %s cannot be converted to Maya geometry." % self.sceneInterface().pathAsString()
shapeName = IECoreMaya.FnDagNode.defaultShapeName( transformNode )
shape = transformNode + "|" + shapeName
create = False
if not maya.cmds.objExists( shape ):
create = True
elif maya.cmds.nodeType( shape ) != type:
# Rename existing shape
newName = shapeName + "_orig"
maya.cmds.rename( shape, newName )
IECore.msg( IECore.Msg.Level.Warning, "FnSceneShape.convertObjectToGeometry", "Renaming incompatible shape %s to %s." % shape, newName )
create = True
if create:
maya.cmds.createNode( type, parent = transformNode, name = shapeName )
if type == "mesh":
maya.cmds.sets(shape, add="initialShadingGroup" )
index = self.__queryIndexForPath( "/" )
if not maya.cmds.listConnections( shape+"."+plug, source = True, destination = False ) and not maya.cmds.getAttr( shape+"."+plug, l=True ):
maya.cmds.connectAttr( node+'.outObjects['+str(index)+']', shape+"."+plug, f=True )
if type == "mesh":
object = self.sceneInterface().readObject(0.0)
interpolation = object.interpolation
try:
IECoreMaya.ToMayaMeshConverter.setMeshInterpolationAttribute( shape, interpolation )
except:
IECore.msg( IECore.Msg.Level.Warning, "FnSceneShape.convertObjectToGeometry", "Failed to set interpolation on %s." % shape )
## Returns the maya node type that this function set operates on
@classmethod
def _mayaNodeType( cls ):
return "ieSceneShape"
|
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2010 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import math
from math import pi, atan
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos import euclid
import pyglet
from pyglet.gl import *
from pyglet.window import key
def circle(radius, color):
circumference = 2*math.pi*radius
step_size = 5
steps = max(4, int(circumference/step_size))
adelta = 2*math.pi/steps
points = [0,0,radius,0]
for step in range(1,steps+1):
x = radius*math.cos(step*adelta)
y = radius*math.sin(step*adelta)
points += [x,y]
num_points = steps+2
vertex_list = pyglet.graphics.vertex_list(num_points,
('v2f', points),
('c4B', list(color)*num_points)
)
return vertex_list
def rectangle(x1, y1, x2, y2, color):
return pyglet.graphics.vertex_list(4,
('v2f', [x1, y1, x2, y1, x2, y2, x1, y2]),
('c4B', color*4)
)
def up_triange(x,y, h, w, color):
return pyglet.graphics.vertex_list(3,
('v2f', [x, y, x-w/2, y+h, x+w/2, y+h]),
('c4B', color*3)
)
def down_triange(x,y, h, w, color):
return pyglet.graphics.vertex_list(3,
('v2f', [x, y, x-w/2, y-h, x+w/2, y-h]),
('c4B', color*3)
)
class Widget(cocos.cocosnode.CocosNode):
def __init__(self):
super(Widget, self).__init__()
self.selected = False
self.hovered = False
def set_hover(self, value):
self.hovered = value
def set_selected(self, position):
pass
def on_dragged(self, dx, dy):
self.x += dx
self.y += dy
def is_mouse_over(self, position):
return False
class BallWidget(Widget):
def __init__(self, radius, color):
super(BallWidget, self).__init__()
self.radius = radius
self.color = color
self.body = circle(radius, color)
self.hover_envelope = circle(radius*1.2, (255,255,0,100))
self.selected_envelope = circle(radius*1.5, (255,255,255,200))
def draw(self):
glPushMatrix()
self.transform()
if self.selected:
self.selected_envelope.draw(GL_TRIANGLE_FAN)
elif self.hovered:
self.hover_envelope.draw(GL_TRIANGLE_FAN)
self.body.draw(GL_TRIANGLE_FAN)
glPopMatrix()
def is_mouse_over(self, position):
px, py = position
x, y = self.position
if (px-x)**2+(py-y)**2 < self.radius**2:
return True
return False
class UILayer(cocos.layer.Layer):
is_event_handler = True
def __init__(self):
super(UILayer, self).__init__()
self.hovering = None
self.hovering_all = []
self.mouse_down = False
self.dragging = False
def on_mouse_motion(self, x, y, dx, dy):
selected = None
self.hovering_all = []
for c in self.get_children():
if isinstance(c, Widget):
if c.is_mouse_over((x,y)):
selected = c
self.hovering_all.append( c )
c.set_hover(False)
if selected:
if self.hovering not in self.hovering_all:
selected.set_hover(True)
self.hovering = selected
else:
self.hovering.set_hover(True)
else:
self.hovering = None
def on_mouse_press(self, *args):
self.mouse_down = True
def on_mouse_release(self, *args):
self.mouse_down = False
self.dragging = False
def on_mouse_drag(self, x, y, dx, dy, button, modifiers):
self.dragging = True
if self.hovering:
self.hovering.on_dragged(dx,dy)
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
if self.hovering_all and not self.mouse_down:
top = self.hovering_all.pop(0)
self.hovering_all.append(top)
self.hovering.set_hover(False)
self.hovering = self.hovering_all[0]
self.hovering.set_hover(True)
class TimelineModel(object):
def get_markers(self):
pass
def get_duration(self):
pass
def get_position(self):
pass
class TimeLine(Widget):
def __init__(self, model):
super(TimeLine, self).__init__()
self.model = model
x, y = director.get_window_size()
self.x_margin = xm = 20
self.y_margin = ym = 20
self.height = h = 10
self.width = x-2*xm
self.color = 125,0,0,125
self.bar = rectangle( xm, y-ym, x-xm, y-ym-h, self.color)
def draw(self):
# draw bar
self.bar.draw(GL_QUADS)
# draw ticks
d = self.model.get_duration()
if d != 0:
step = 2** ( int(math.log(d, 2)-2) )
p = 0
while p <= d:
self.show_tick( p )
p += step
markers = self.model.get_markers()
markers_pxs = [ self.map_to_pixel(m) for m in markers ]
x, y = director.get_window_size()
ym = self.y_margin
h = self.height
for pixel in markers_pxs:
t = up_triange(pixel, y-ym-h/2, 10, 10, (100,100,255,255))
t.draw(GL_TRIANGLES)
pixel = self.map_to_pixel( self.model.get_position() )
t = down_triange(pixel, y-ym-h/2, 10, 10, (255,255,0,255))
t.draw(GL_TRIANGLES)
def map_to_pixel(self, when):
d = self.model.get_duration()
xm = self.x_margin
if d == 0:
return xm
w = self.width
p = (when/d) * w
return xm + p
def show_tick(self, when):
l = self.height + 5
x,y = director.get_window_size()
ym = self.y_margin
p = self.map_to_pixel( when )
# draw line
glColor4ub(128, 128, 128,100)
glLineWidth(1)
glBegin(GL_LINES)
glVertex2f( p, y-ym )
glVertex2f( p, y-ym-l )
glEnd()
# draw label
label = pyglet.text.Label(str(when),
font_name='Monotype',
#font_name='Times New Roman',
font_size=8,
x=p, y=y-ym-l-7,
anchor_x='center', anchor_y='center')
label.draw()
|
|
from test.test_support import (TESTFN, run_unittest, import_module, unlink,
requires, _2G, _4G)
import unittest
import os, re, itertools, socket, sys
mmap = import_module('mmap')
PAGESIZE = mmap.PAGESIZE
class MmapTests(unittest.TestCase):
def setUp(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def tearDown(self):
try:
os.unlink(TESTFN)
except OSError:
pass
def test_basic(self):
# Test mmap module on Unix systems and Windows
# Create a file to be mmap'ed.
f = open(TESTFN, 'w+')
try:
# Write 2 pages worth of data to the file
f.write('\0'* PAGESIZE)
f.write('foo')
f.write('\0'* (PAGESIZE-3) )
f.flush()
m = mmap.mmap(f.fileno(), 2 * PAGESIZE)
f.close()
# Simple sanity checks
tp = str(type(m)) # SF bug 128713: segfaulted on Linux
self.assertEqual(m.find('foo'), PAGESIZE)
self.assertEqual(len(m), 2*PAGESIZE)
self.assertEqual(m[0], '\0')
self.assertEqual(m[0:3], '\0\0\0')
# Shouldn't crash on boundary (Issue #5292)
self.assertRaises(IndexError, m.__getitem__, len(m))
self.assertRaises(IndexError, m.__setitem__, len(m), '\0')
# Modify the file's content
m[0] = '3'
m[PAGESIZE +3: PAGESIZE +3+3] = 'bar'
# Check that the modification worked
self.assertEqual(m[0], '3')
self.assertEqual(m[0:3], '3\0\0')
self.assertEqual(m[PAGESIZE-1 : PAGESIZE + 7], '\0foobar\0')
m.flush()
# Test doing a regular expression match in an mmap'ed file
match = re.search('[A-Za-z]+', m)
if match is None:
self.fail('regex match on mmap failed!')
else:
start, end = match.span(0)
length = end - start
self.assertEqual(start, PAGESIZE)
self.assertEqual(end, PAGESIZE + 6)
# test seeking around (try to overflow the seek implementation)
m.seek(0,0)
self.assertEqual(m.tell(), 0)
m.seek(42,1)
self.assertEqual(m.tell(), 42)
m.seek(0,2)
self.assertEqual(m.tell(), len(m))
# Try to seek to negative position...
self.assertRaises(ValueError, m.seek, -1)
# Try to seek beyond end of mmap...
self.assertRaises(ValueError, m.seek, 1, 2)
# Try to seek to negative position...
self.assertRaises(ValueError, m.seek, -len(m)-1, 2)
# Try resizing map
try:
m.resize(512)
except SystemError:
# resize() not supported
# No messages are printed, since the output of this test suite
# would then be different across platforms.
pass
else:
# resize() is supported
self.assertEqual(len(m), 512)
# Check that we can no longer seek beyond the new size.
self.assertRaises(ValueError, m.seek, 513, 0)
# Check that the underlying file is truncated too
# (bug #728515)
f = open(TESTFN)
f.seek(0, 2)
self.assertEqual(f.tell(), 512)
f.close()
self.assertEqual(m.size(), 512)
m.close()
finally:
try:
f.close()
except OSError:
pass
def test_access_parameter(self):
# Test for "access" keyword parameter
mapsize = 10
with open(TESTFN, "wb") as f:
f.write("a"*mapsize)
f = open(TESTFN, "rb")
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_READ)
self.assertEqual(m[:], 'a'*mapsize, "Readonly memory map data incorrect.")
# Ensuring that readonly mmap can't be slice assigned
try:
m[:] = 'b'*mapsize
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be item assigned
try:
m[0] = 'b'
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be write() to
try:
m.seek(0,0)
m.write('abc')
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be write_byte() to
try:
m.seek(0,0)
m.write_byte('d')
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be resized
try:
m.resize(2*mapsize)
except SystemError: # resize is not universally supported
pass
except TypeError:
pass
else:
self.fail("Able to resize readonly memory map")
f.close()
m.close()
del m, f
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), 'a'*mapsize,
"Readonly memory map data file was modified")
# Opening mmap with size too big
import sys
f = open(TESTFN, "r+b")
try:
m = mmap.mmap(f.fileno(), mapsize+1)
except ValueError:
# we do not expect a ValueError on Windows
# CAUTION: This also changes the size of the file on disk, and
# later tests assume that the length hasn't changed. We need to
# repair that.
if sys.platform.startswith('win'):
self.fail("Opening mmap with size+1 should work on Windows.")
else:
# we expect a ValueError on Unix, but not on Windows
if not sys.platform.startswith('win'):
self.fail("Opening mmap with size+1 should raise ValueError.")
m.close()
f.close()
if sys.platform.startswith('win'):
# Repair damage from the resizing test.
f = open(TESTFN, 'r+b')
f.truncate(mapsize)
f.close()
# Opening mmap with access=ACCESS_WRITE
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_WRITE)
# Modifying write-through memory map
m[:] = 'c'*mapsize
self.assertEqual(m[:], 'c'*mapsize,
"Write-through memory map memory not updated properly.")
m.flush()
m.close()
f.close()
f = open(TESTFN, 'rb')
stuff = f.read()
f.close()
self.assertEqual(stuff, 'c'*mapsize,
"Write-through memory map data file not updated properly.")
# Opening mmap with access=ACCESS_COPY
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_COPY)
# Modifying copy-on-write memory map
m[:] = 'd'*mapsize
self.assertEqual(m[:], 'd' * mapsize,
"Copy-on-write memory map data not written correctly.")
m.flush()
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), 'c'*mapsize,
"Copy-on-write test data file should not be modified.")
# Ensuring copy-on-write maps cannot be resized
self.assertRaises(TypeError, m.resize, 2*mapsize)
m.close()
del m, f
# Ensuring invalid access parameter raises exception
f = open(TESTFN, "r+b")
self.assertRaises(ValueError, mmap.mmap, f.fileno(), mapsize, access=4)
f.close()
if os.name == "posix":
# Try incompatible flags, prot and access parameters.
f = open(TESTFN, "r+b")
self.assertRaises(ValueError, mmap.mmap, f.fileno(), mapsize,
flags=mmap.MAP_PRIVATE,
prot=mmap.PROT_READ, access=mmap.ACCESS_WRITE)
f.close()
# Try writing with PROT_EXEC and without PROT_WRITE
prot = mmap.PROT_READ | getattr(mmap, 'PROT_EXEC', 0)
with open(TESTFN, "r+b") as f:
m = mmap.mmap(f.fileno(), mapsize, prot=prot)
self.assertRaises(TypeError, m.write, b"abcdef")
self.assertRaises(TypeError, m.write_byte, 0)
m.close()
def test_bad_file_desc(self):
# Try opening a bad file descriptor...
self.assertRaises(mmap.error, mmap.mmap, -2, 4096)
def test_tougher_find(self):
# Do a tougher .find() test. SF bug 515943 pointed out that, in 2.2,
# searching for data with embedded \0 bytes didn't work.
f = open(TESTFN, 'w+')
data = 'aabaac\x00deef\x00\x00aa\x00'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
for start in range(n+1):
for finish in range(start, n+1):
slice = data[start : finish]
self.assertEqual(m.find(slice), data.find(slice))
self.assertEqual(m.find(slice + 'x'), -1)
m.close()
def test_find_end(self):
# test the new 'end' parameter works as expected
f = open(TESTFN, 'w+')
data = 'one two ones'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
self.assertEqual(m.find('one'), 0)
self.assertEqual(m.find('ones'), 8)
self.assertEqual(m.find('one', 0, -1), 0)
self.assertEqual(m.find('one', 1), 8)
self.assertEqual(m.find('one', 1, -1), 8)
self.assertEqual(m.find('one', 1, -2), -1)
m.close()
def test_rfind(self):
# test the new 'end' parameter works as expected
f = open(TESTFN, 'w+')
data = 'one two ones'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
self.assertEqual(m.rfind('one'), 8)
self.assertEqual(m.rfind('one '), 0)
self.assertEqual(m.rfind('one', 0, -1), 8)
self.assertEqual(m.rfind('one', 0, -2), 0)
self.assertEqual(m.rfind('one', 1, -1), 8)
self.assertEqual(m.rfind('one', 1, -2), -1)
m.close()
def test_double_close(self):
# make sure a double close doesn't crash on Solaris (Bug# 665913)
f = open(TESTFN, 'w+')
f.write(2**16 * 'a') # Arbitrary character
f.close()
f = open(TESTFN)
mf = mmap.mmap(f.fileno(), 2**16, access=mmap.ACCESS_READ)
mf.close()
mf.close()
f.close()
def test_entire_file(self):
# test mapping of entire file by passing 0 for map length
if hasattr(os, "stat"):
f = open(TESTFN, "w+")
f.write(2**16 * 'm') # Arbitrary character
f.close()
f = open(TESTFN, "rb+")
mf = mmap.mmap(f.fileno(), 0)
self.assertEqual(len(mf), 2**16, "Map size should equal file size.")
self.assertEqual(mf.read(2**16), 2**16 * "m")
mf.close()
f.close()
def test_length_0_offset(self):
# Issue #10916: test mapping of remainder of file by passing 0 for
# map length with an offset doesn't cause a segfault.
if not hasattr(os, "stat"):
self.skipTest("needs os.stat")
# NOTE: allocation granularity is currently 65536 under Win64,
# and therefore the minimum offset alignment.
with open(TESTFN, "wb") as f:
f.write((65536 * 2) * b'm') # Arbitrary character
with open(TESTFN, "rb") as f:
mf = mmap.mmap(f.fileno(), 0, offset=65536, access=mmap.ACCESS_READ)
try:
self.assertRaises(IndexError, mf.__getitem__, 80000)
finally:
mf.close()
def test_length_0_large_offset(self):
# Issue #10959: test mapping of a file by passing 0 for
# map length with a large offset doesn't cause a segfault.
if not hasattr(os, "stat"):
self.skipTest("needs os.stat")
with open(TESTFN, "wb") as f:
f.write(115699 * b'm') # Arbitrary character
with open(TESTFN, "w+b") as f:
self.assertRaises(ValueError, mmap.mmap, f.fileno(), 0,
offset=2147418112)
def test_move(self):
# make move works everywhere (64-bit format problem earlier)
f = open(TESTFN, 'w+')
f.write("ABCDEabcde") # Arbitrary character
f.flush()
mf = mmap.mmap(f.fileno(), 10)
mf.move(5, 0, 5)
self.assertEqual(mf[:], "ABCDEABCDE", "Map move should have duplicated front 5")
mf.close()
f.close()
# more excessive test
data = "0123456789"
for dest in range(len(data)):
for src in range(len(data)):
for count in range(len(data) - max(dest, src)):
expected = data[:dest] + data[src:src+count] + data[dest+count:]
m = mmap.mmap(-1, len(data))
m[:] = data
m.move(dest, src, count)
self.assertEqual(m[:], expected)
m.close()
# segfault test (Issue 5387)
m = mmap.mmap(-1, 100)
offsets = [-100, -1, 0, 1, 100]
for source, dest, size in itertools.product(offsets, offsets, offsets):
try:
m.move(source, dest, size)
except ValueError:
pass
offsets = [(-1, -1, -1), (-1, -1, 0), (-1, 0, -1), (0, -1, -1),
(-1, 0, 0), (0, -1, 0), (0, 0, -1)]
for source, dest, size in offsets:
self.assertRaises(ValueError, m.move, source, dest, size)
m.close()
m = mmap.mmap(-1, 1) # single byte
self.assertRaises(ValueError, m.move, 0, 0, 2)
self.assertRaises(ValueError, m.move, 1, 0, 1)
self.assertRaises(ValueError, m.move, 0, 1, 1)
m.move(0, 0, 1)
m.move(0, 0, 0)
def test_anonymous(self):
# anonymous mmap.mmap(-1, PAGE)
m = mmap.mmap(-1, PAGESIZE)
for x in xrange(PAGESIZE):
self.assertEqual(m[x], '\0', "anonymously mmap'ed contents should be zero")
for x in xrange(PAGESIZE):
m[x] = ch = chr(x & 255)
self.assertEqual(m[x], ch)
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = "".join(chr(c) for c in reversed(range(256)))
m = mmap.mmap(-1, len(s))
m[:] = s
self.assertEqual(m[:], s)
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(m[start:stop:step],
s[start:stop:step])
def test_extended_set_del_slice(self):
# Test extended slicing by comparing with list slicing.
s = "".join(chr(c) for c in reversed(range(256)))
m = mmap.mmap(-1, len(s))
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
m[:] = s
self.assertEqual(m[:], s)
L = list(s)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data = "".join(reversed(data))
L[start:stop:step] = data
m[start:stop:step] = data
self.assertEqual(m[:], "".join(L))
def make_mmap_file (self, f, halfsize):
# Write 2 pages worth of data to the file
f.write ('\0' * halfsize)
f.write ('foo')
f.write ('\0' * (halfsize - 3))
f.flush ()
return mmap.mmap (f.fileno(), 0)
def test_offset (self):
f = open (TESTFN, 'w+b')
try: # unlink TESTFN no matter what
halfsize = mmap.ALLOCATIONGRANULARITY
m = self.make_mmap_file (f, halfsize)
m.close ()
f.close ()
mapsize = halfsize * 2
# Try invalid offset
f = open(TESTFN, "r+b")
for offset in [-2, -1, None]:
try:
m = mmap.mmap(f.fileno(), mapsize, offset=offset)
self.assertEqual(0, 1)
except (ValueError, TypeError, OverflowError):
pass
else:
self.assertEqual(0, 0)
f.close()
# Try valid offset, hopefully 8192 works on all OSes
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), mapsize - halfsize, offset=halfsize)
self.assertEqual(m[0:3], 'foo')
f.close()
# Try resizing map
try:
m.resize(512)
except SystemError:
pass
else:
# resize() is supported
self.assertEqual(len(m), 512)
# Check that we can no longer seek beyond the new size.
self.assertRaises(ValueError, m.seek, 513, 0)
# Check that the content is not changed
self.assertEqual(m[0:3], 'foo')
# Check that the underlying file is truncated too
f = open(TESTFN)
f.seek(0, 2)
self.assertEqual(f.tell(), halfsize + 512)
f.close()
self.assertEqual(m.size(), halfsize + 512)
m.close()
finally:
f.close()
try:
os.unlink(TESTFN)
except OSError:
pass
def test_subclass(self):
class anon_mmap(mmap.mmap):
def __new__(klass, *args, **kwargs):
return mmap.mmap.__new__(klass, -1, *args, **kwargs)
anon_mmap(PAGESIZE)
def test_prot_readonly(self):
if not hasattr(mmap, 'PROT_READ'):
return
mapsize = 10
with open(TESTFN, "wb") as f:
f.write("a"*mapsize)
f = open(TESTFN, "rb")
m = mmap.mmap(f.fileno(), mapsize, prot=mmap.PROT_READ)
self.assertRaises(TypeError, m.write, "foo")
f.close()
def test_error(self):
self.assertTrue(issubclass(mmap.error, EnvironmentError))
self.assertIn("mmap.error", str(mmap.error))
def test_io_methods(self):
data = "0123456789"
with open(TESTFN, "wb") as f:
f.write("x"*len(data))
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), len(data))
f.close()
# Test write_byte()
for i in xrange(len(data)):
self.assertEqual(m.tell(), i)
m.write_byte(data[i])
self.assertEqual(m.tell(), i+1)
self.assertRaises(ValueError, m.write_byte, "x")
self.assertEqual(m[:], data)
# Test read_byte()
m.seek(0)
for i in xrange(len(data)):
self.assertEqual(m.tell(), i)
self.assertEqual(m.read_byte(), data[i])
self.assertEqual(m.tell(), i+1)
self.assertRaises(ValueError, m.read_byte)
# Test read()
m.seek(3)
self.assertEqual(m.read(3), "345")
self.assertEqual(m.tell(), 6)
# Test write()
m.seek(3)
m.write("bar")
self.assertEqual(m.tell(), 6)
self.assertEqual(m[:], "012bar6789")
m.seek(8)
self.assertRaises(ValueError, m.write, "bar")
m.close()
if os.name == 'nt':
def test_tagname(self):
data1 = "0123456789"
data2 = "abcdefghij"
assert len(data1) == len(data2)
# Test same tag
m1 = mmap.mmap(-1, len(data1), tagname="foo")
m1[:] = data1
m2 = mmap.mmap(-1, len(data2), tagname="foo")
m2[:] = data2
self.assertEqual(m1[:], data2)
self.assertEqual(m2[:], data2)
m2.close()
m1.close()
# Test different tag
m1 = mmap.mmap(-1, len(data1), tagname="foo")
m1[:] = data1
m2 = mmap.mmap(-1, len(data2), tagname="boo")
m2[:] = data2
self.assertEqual(m1[:], data1)
self.assertEqual(m2[:], data2)
m2.close()
m1.close()
def test_crasher_on_windows(self):
# Should not crash (Issue 1733986)
m = mmap.mmap(-1, 1000, tagname="foo")
try:
mmap.mmap(-1, 5000, tagname="foo")[:] # same tagname, but larger size
except:
pass
m.close()
# Should not crash (Issue 5385)
with open(TESTFN, "wb") as f:
f.write("x"*10)
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), 0)
f.close()
try:
m.resize(0) # will raise WindowsError
except:
pass
try:
m[:]
except:
pass
m.close()
def test_invalid_descriptor(self):
# socket file descriptors are valid, but out of range
# for _get_osfhandle, causing a crash when validating the
# parameters to _get_osfhandle.
s = socket.socket()
try:
with self.assertRaises(mmap.error):
m = mmap.mmap(s.fileno(), 10)
finally:
s.close()
class LargeMmapTests(unittest.TestCase):
def setUp(self):
unlink(TESTFN)
def tearDown(self):
unlink(TESTFN)
def _make_test_file(self, num_zeroes, tail):
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
requires('largefile',
'test requires %s bytes and a long time to run' % str(0x180000000))
f = open(TESTFN, 'w+b')
try:
f.seek(num_zeroes)
f.write(tail)
f.flush()
except (IOError, OverflowError):
f.close()
raise unittest.SkipTest("filesystem does not have largefile support")
return f
def test_large_offset(self):
with self._make_test_file(0x14FFFFFFF, b" ") as f:
m = mmap.mmap(f.fileno(), 0, offset=0x140000000, access=mmap.ACCESS_READ)
try:
self.assertEqual(m[0xFFFFFFF], b" ")
finally:
m.close()
def test_large_filesize(self):
with self._make_test_file(0x17FFFFFFF, b" ") as f:
m = mmap.mmap(f.fileno(), 0x10000, access=mmap.ACCESS_READ)
try:
self.assertEqual(m.size(), 0x180000000)
finally:
m.close()
# Issue 11277: mmap() with large (~4GB) sparse files crashes on OS X.
def _test_around_boundary(self, boundary):
tail = b' DEARdear '
start = boundary - len(tail) // 2
end = start + len(tail)
with self._make_test_file(start, tail) as f:
m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
try:
self.assertEqual(m[start:end], tail)
finally:
m.close()
@unittest.skipUnless(sys.maxsize > _4G, "test cannot run on 32-bit systems")
def test_around_2GB(self):
self._test_around_boundary(_2G)
@unittest.skipUnless(sys.maxsize > _4G, "test cannot run on 32-bit systems")
def test_around_4GB(self):
self._test_around_boundary(_4G)
def test_main():
run_unittest(MmapTests, LargeMmapTests)
if __name__ == '__main__':
test_main()
|
|
#-*- coding: utf-8 -*-
#! /usr/bin/env python
'''
filename: lab17_runTFLenet5_mnist.py
description: simple end-to-end LetNet5 implementation
- For the purpose of EverybodyTensorFlow tutorial
-
- training with Mnist data set from Yann's website.
- the benchmark test error rate is 0.95% which is given by LeCun 1998
- references:
- https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py
- https://github.com/sujaybabruwad/LeNet-in-Tensorflow/blob/master/LeNet-Lab.ipynb
author: Jaewook Kang
date : 2018 Feb.
'''
# Anybody know why we should include "__future__" code conventionally?
# anyway I include the below:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
from datetime import datetime
from os import getcwd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
sys.path.insert(0, getcwd()+'/tf_my_modules/cnn')
from tfmodel_lenet5 import Lenet5
from mnist_data_loader import DataFilename
from mnist_data_loader import MnistLoader
# configure training parameters =====================================
class TrainConfig(object):
def __init__(self):
self.learning_rate = 0.01
self.is_learning_rate_decay = True
self.learning_rate_decay_rate =0.99
self.opt_type='Adam'
self.training_epochs = 100
self.minibatch_size = 1000
# the number of step between evaluation
self.display_step = 5
self.total_batch = int(TRAININGSET_SIZE / self.minibatch_size)
# batch norm config
self.batch_norm_epsilon = 1E-5
self.batch_norm_decay = 0.99
self.FLAGS = None
# FC layer config
self.dropout_keeprate = 0.8
self.fc_layer_l2loss_epsilon = 5E-5
self.tf_data_type = tf.float32
# tensorboard config
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
self.root_logdir = getcwd() + '/export/lenet5/'
self.ckptdir = self.root_logdir + '/pb_and_ckpt/'
self.tflogdir = "{}/run-{}/".format(self.root_logdir+'/tf_logs', now)
# data size config
# TRAININGSET_SIZE = 50000
# VALIDATIONSET_SIZE = 10000
# TESTSET_SIZE = 10000
TRAININGSET_SIZE = 5000
VALIDATIONSET_SIZE = 1000
TESTSET_SIZE = 1000
# worker instance declaration
datafilename_worker = DataFilename()
mnist_data_loader = MnistLoader()
trainconfig_worker = TrainConfig()
# Download the data
train_data_filepathname = mnist_data_loader.download_mnist_dataset(filename=datafilename_worker.trainingimages_filename)
train_labels_filepathname = mnist_data_loader.download_mnist_dataset(filename=datafilename_worker.traininglabels_filename)
test_data_filepathname = mnist_data_loader.download_mnist_dataset(filename=datafilename_worker.testimages_filename)
test_labels_filepathname = mnist_data_loader.download_mnist_dataset(filename=datafilename_worker.testlabels_filename)
# extract data from gzip files into numpy arrays
train_data = mnist_data_loader.extract_data(filename=train_data_filepathname,
num_images=TRAININGSET_SIZE + VALIDATIONSET_SIZE)
train_labels = mnist_data_loader.extract_label(filename=train_labels_filepathname,
num_images=TRAININGSET_SIZE + VALIDATIONSET_SIZE)
test_data = mnist_data_loader.extract_data(filename=test_data_filepathname,
num_images=TESTSET_SIZE)
test_labels = mnist_data_loader.extract_label(filename=test_labels_filepathname,
num_images=TESTSET_SIZE)
# prepare validation by spliting training set
validation_data = train_data[:VALIDATIONSET_SIZE, ...]
validation_labels = train_labels[:VALIDATIONSET_SIZE]
train_data = train_data[VALIDATIONSET_SIZE:, ...]
train_labels = train_labels[VALIDATIONSET_SIZE:]
# [data set should be zipped here]
# network model construction ======================
# TF computational graph construction
lenet5_tf_graph = tf.Graph()
with lenet5_tf_graph.as_default():
# training nodes (data,label) placeholders
lenet5_model_in = tf.placeholder(dtype=trainconfig_worker.tf_data_type,
shape=[None, mnist_data_loader.IMAGE_SIZE,
mnist_data_loader.IMAGE_SIZE,
mnist_data_loader.NUM_CHANNELS])
lenet5_label = tf.placeholder(dtype=tf.int64,
shape=[None, ])
dropout_keeprate_node = tf.placeholder(dtype=trainconfig_worker.tf_data_type)
lenet5_model_builder = Lenet5(dropout_keeprate_for_fc=dropout_keeprate_node,
dtype=trainconfig_worker.tf_data_type,
save_ckpt_path=trainconfig_worker.ckptdir)
lenet5_model_out = lenet5_model_builder.get_tf_model(input_nodes=lenet5_model_in)
with tf.name_scope("cost_func"):
lenet5_cost_op = lenet5_model_builder.get_tf_cost_fuction(train_labels_node = lenet5_label,
is_l2_loss=True,
epsilon=trainconfig_worker.fc_layer_l2loss_epsilon)
with tf.name_scope('optimizer'):
lenet5_opt_op = lenet5_model_builder.get_tf_optimizer(opt_type=trainconfig_worker.opt_type,
learning_rate=trainconfig_worker.learning_rate,
total_batch_size=TRAININGSET_SIZE,
minibatch_size=trainconfig_worker.minibatch_size,
is_exp_decay=trainconfig_worker.is_learning_rate_decay,
decay_rate=trainconfig_worker.learning_rate_decay_rate)
with tf.name_scope('model_out'):
model_pred = tf.nn.softmax(lenet5_model_out)
with tf.name_scope('eval_performance'):
error = tf.equal(tf.argmax(model_pred,1),lenet5_label)
tf_pred_accuracy = tf.reduce_mean(tf.cast(error,tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
## file writing for Tensorboard
file_writer = tf.summary.FileWriter(logdir=trainconfig_worker.tflogdir)
file_writer.add_graph(lenet5_tf_graph)
## Summary for Tensorboard visualization
tb_summary_accuracy = tf.summary.scalar('accuracy', tf_pred_accuracy)
tb_summary_cost = tf.summary.scalar('loss', lenet5_cost_op)
# network model training ==============================
train_error_rate = np.zeros(shape=np.ceil(trainconfig_worker.training_epochs/trainconfig_worker.display_step).astype(np.int16),
dtype=np.float32)
validation_error_rate = np.zeros(shape=np.ceil(trainconfig_worker.training_epochs/trainconfig_worker.display_step).astype(np.int16),
dtype=np.float32)
test_error_rate = np.zeros(shape=np.ceil(trainconfig_worker.training_epochs/trainconfig_worker.display_step).astype(np.int16),
dtype=np.float32)
with tf.Session(graph=lenet5_tf_graph) as sess:
# Run the variable initializer
sess.run(init)
print("-------------------------------------------")
rate_record_index = 0
for epoch in range(trainconfig_worker.training_epochs):
avg_cost = 0.
avg_minibatch_error_rate = 0.
start_time = time.time()
# [data shuffling here]
for i in range(trainconfig_worker.total_batch):
data_start_index = i * trainconfig_worker.minibatch_size
data_end_index = (i + 1) * trainconfig_worker.minibatch_size
minibatch_data = train_data [data_start_index:data_end_index, ...]
minibatch_label = train_labels[data_start_index:data_end_index]
_, minibatch_cost = sess.run([lenet5_opt_op,lenet5_cost_op],
feed_dict={lenet5_model_in: minibatch_data,
lenet5_label: minibatch_label,
dropout_keeprate_node: trainconfig_worker.dropout_keeprate})
# compute average cost and error rate
avg_cost += minibatch_cost
avg_cost = avg_cost / trainconfig_worker.total_batch
if trainconfig_worker.display_step == 0:
continue
elif (epoch + 1) % trainconfig_worker.display_step == 0:
elapsed_time = time.time() - start_time
train_error_rate[rate_record_index] = (1.0 - tf_pred_accuracy.eval(feed_dict={lenet5_model_in: train_data,
lenet5_label: train_labels,
dropout_keeprate_node: 1.0})) *100.0
validation_error_rate[rate_record_index] = (1.0 - tf_pred_accuracy.eval(feed_dict={lenet5_model_in: validation_data,
lenet5_label: validation_labels,
dropout_keeprate_node: 1.0})) * 100.0
test_error_rate[rate_record_index] = (1.0 - tf_pred_accuracy.eval(feed_dict={lenet5_model_in: test_data,
lenet5_label: test_labels,
dropout_keeprate_node: 1.0})) * 100.0
# tb_summary_cost_result, tb_summary_accuracy_result = sess.run([tb_summary_cost,tb_summary_accuracy],
# feed_dict={lenet5_model_in: train_data,
# lenet5_label: train_labels,
# dropout_keeprate_node:1.0})
# file_writer.add_summary(summary_str,step)
print('At epoch = %d, elapsed_time = %.1f ms' % (epoch, elapsed_time))
print("Training set avg cost (avg over minibatches)=%.2f" % avg_cost)
print("Training set Err rate (avg over minibatches)= %.2f %% " % (train_error_rate[rate_record_index]))
print("Validation set Err rate (total batch)= %.2f %%" % (validation_error_rate[rate_record_index]))
print("Test Set Err. rate (total batch) = %.2f %%" % (test_error_rate[rate_record_index]) )
print("--------------------------------------------")
rate_record_index += 1
print("Training finished!")
#file_writer.close()
# Training result visualization ===============================================
hfig1 = plt.figure(1, figsize=(10, 10))
err_rate_index = np.array([elem for elem in range(train_error_rate.shape[0])])
plt.plot(err_rate_index, train_error_rate, label='Training err', color='r', marker='o')
plt.plot(err_rate_index, validation_error_rate, label='Validation err', color='b', marker='x')
plt.plot(err_rate_index, test_error_rate, label='Test err', color='g', marker='d')
plt.legend()
plt.title('Train/Valid/Test Error rate')
plt.xlabel('Iteration epoch')
plt.ylabel('error Rate')
plt.show()
|
|
import pytest
from plenum.common.signer_did import DidSigner
from stp_core.crypto.util import randomSeed
from plenum.common.constants import NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT, \
ALIAS, SERVICES, VALIDATOR
from plenum.common.signer_simple import SimpleSigner
from plenum.common.util import cryptonymToHex, randomString
from sovrin_client.test.cli.conftest import newStewardCli as getNewStewardCli, \
newStewardVals as getNewStewardVals, newNodeVals as getNewNodeVals
from sovrin_client.test.cli.constants import CONNECTED_TO_TEST, \
NODE_REQUEST_COMPLETED, NODE_REQUEST_FAILED, INVALID_SYNTAX
from sovrin_client.test.cli.helper import addAgent
NYM_ADDED = "Nym {remote} added"
@pytest.yield_fixture(scope="function")
def cliWithRandomName(CliBuilder):
yield from CliBuilder(randomString(6))
@pytest.fixture(scope="function")
def newStewardVals():
return getNewStewardVals()
@pytest.fixture(scope="function")
def newNodeVals():
return getNewNodeVals()
@pytest.fixture(scope="function")
def newStewardCli(be, do, poolNodesStarted, trusteeCli,
cliWithRandomName, newStewardVals):
return getNewStewardCli(be, do, poolNodesStarted, trusteeCli,
CONNECTED_TO_TEST, cliWithRandomName,
newStewardVals)
def ensurePoolIsOperable(be, do, cli):
randomNymMapper = {
'remote': DidSigner(seed=randomSeed()).identifier
}
addAgent(be, do, cli, randomNymMapper)
def testSendNodeSucceedsIfServicesIsArrayWithValidatorValueOnly(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][SERVICES] = [VALIDATOR]
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_COMPLETED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeSucceedsIfServicesIsEmptyArray(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][SERVICES] = []
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_COMPLETED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
@pytest.mark.skip(reason='SOV-1092')
def testSendNodeFailsIfDestIsSmallDecimalNumber(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeIdr'] = 42
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
@pytest.mark.skip(reason='SOV-1092')
def testSendNodeFailsIfDestIsShortReadableName(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeIdr'] = 'TheNewNode'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfDestIsHexKey(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeIdr'] = cryptonymToHex(
newNodeVals['newNodeIdr']).decode()
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
@pytest.mark.skip(reason='SOV-1096')
def testSendNodeHasInvalidSyntaxIfDestIsEmpty(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeIdr'] = ''
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=INVALID_SYNTAX, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
@pytest.mark.skip(reason='SOV-1096')
def testSendNodeHasInvalidSyntaxIfDestIsMissed(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
be(newStewardCli)
do('send NODE data={newNodeData}',
mapper=newNodeVals, expect=INVALID_SYNTAX, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfNodeIpContainsLeadingSpace(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][NODE_IP] = ' 122.62.52.13'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfNodeIpContainsTrailingSpace(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][NODE_IP] = '122.62.52.13 '
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfNodeIpHasWrongFormat(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][NODE_IP] = '122.62.52'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfSomeNodeIpComponentsAreNegative(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][NODE_IP] = '122.-1.52.13'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfSomeNodeIpComponentsAreHigherThanUpperBound(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][NODE_IP] = '122.62.256.13'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfNodeIpIsEmpty(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][NODE_IP] = ''
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfNodeIpIsMissed(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
del newNodeVals['newNodeData'][NODE_IP]
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfNodePortIsNegative(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][NODE_PORT] = -1
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfNodePortIsHigherThanUpperBound(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][NODE_PORT] = 65536
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfNodePortIsFloat(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][NODE_PORT] = 5555.5
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfNodePortHasWrongFormat(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][NODE_PORT] = 'ninety'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfNodePortIsEmpty(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][NODE_PORT] = ''
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfNodePortIsMissed(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
del newNodeVals['newNodeData'][NODE_PORT]
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfClientIpContainsLeadingSpace(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][CLIENT_IP] = ' 122.62.52.13'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfClientIpContainsTrailingSpace(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][CLIENT_IP] = '122.62.52.13 '
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfClientIpHasWrongFormat(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][CLIENT_IP] = '122.62.52'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfSomeClientIpComponentsAreNegative(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][CLIENT_IP] = '122.-1.52.13'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfSomeClientIpComponentsAreHigherThanUpperBound(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][CLIENT_IP] = '122.62.256.13'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfClientIpIsEmpty(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][CLIENT_IP] = ''
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfClientIpIsMissed(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
del newNodeVals['newNodeData'][CLIENT_IP]
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfClientPortIsNegative(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][CLIENT_PORT] = -1
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfClientPortIsHigherThanUpperBound(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][CLIENT_PORT] = 65536
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfClientPortIsFloat(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][CLIENT_PORT] = 5555.5
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfClientPortHasWrongFormat(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][CLIENT_PORT] = 'ninety'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfClientPortIsEmpty(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][CLIENT_PORT] = ''
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfClientPortIsMissed(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
del newNodeVals['newNodeData'][CLIENT_PORT]
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfAliasIsEmpty(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][ALIAS] = ''
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfAliasIsMissed(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
del newNodeVals['newNodeData'][ALIAS]
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfServicesContainsUnknownValue(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][SERVICES] = [VALIDATOR, 'DECIDER']
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfServicesIsValidatorValue(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][SERVICES] = VALIDATOR # just string, not array
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfServicesIsEmptyString(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'][SERVICES] = ''
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfDataContainsUnknownField(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData']['extra'] = 42
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeFailsIfDataIsEmptyJson(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'] = {}
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
@pytest.mark.skip(reason='INDY-68')
def testSendNodeFailsIfDataIsBrokenJson(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'] = "{'node_ip': '10.0.0.105', 'node_port': 9701"
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
@pytest.mark.skip(reason='INDY-68')
def testSendNodeFailsIfDataIsNotJson(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'] = 'not_json'
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_FAILED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
@pytest.mark.skip(reason='SOV-1096')
def testSendNodeHasInvalidSyntaxIfDataIsEmptyString(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
newNodeVals['newNodeData'] = ''
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=INVALID_SYNTAX, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
@pytest.mark.skip(reason='SOV-1096')
def testSendNodeHasInvalidSyntaxIfDataIsMissed(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
be(newStewardCli)
do('send NODE dest={newNodeIdr}',
mapper=newNodeVals, expect=INVALID_SYNTAX, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
@pytest.mark.skip(reason='SOV-1096')
def testSendNodeHasInvalidSyntaxIfUnknownParameterIsPassed(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData} extra=42',
mapper=newNodeVals, expect=INVALID_SYNTAX, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
def testSendNodeHasInvalidSyntaxIfAllParametersAreMissed(
be, do, poolNodesStarted, newStewardCli):
be(newStewardCli)
do('send NODE', expect=INVALID_SYNTAX, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
@pytest.mark.skip('INDY-88')
def testSendNodeSucceedsIfServicesIsMissed(
be, do, poolNodesStarted, newStewardCli, newNodeVals):
del newNodeVals['newNodeData'][SERVICES]
be(newStewardCli)
do('send NODE dest={newNodeIdr} data={newNodeData}',
mapper=newNodeVals, expect=NODE_REQUEST_COMPLETED, within=8)
ensurePoolIsOperable(be, do, newStewardCli)
|
|
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import six
from tensorflow import keras
from keras_tuner.protos import keras_tuner_pb2
class MetricObservation(object):
"""Metric value at a given step of training across multiple executions.
If the model is trained multiple
times (multiple executions), KerasTuner records the value of each
metric at each training step. These values are aggregated
over multiple executions into a list where each value corresponds
to one execution.
Args:
value: Float or a list of floats. The evaluated metric values.
step: Int. The step of the evaluation, for example, the epoch number.
"""
def __init__(self, value, step):
if not isinstance(value, list):
value = [value]
self.value = value
self.step = step
def append(self, value):
if not isinstance(value, list):
value = [value]
self.value += value
def mean(self):
return np.mean(self.value)
def get_config(self):
return {"value": self.value, "step": self.step}
@classmethod
def from_config(cls, config):
return cls(**config)
def __eq__(self, other):
if not isinstance(other, MetricObservation):
return False
return other.value == self.value and other.step == self.step
def __repr__(self):
return "MetricObservation(value={}, step={})".format(self.value, self.step)
def to_proto(self):
return keras_tuner_pb2.MetricObservation(value=self.value, step=self.step)
@classmethod
def from_proto(cls, proto):
return cls(value=list(proto.value), step=proto.step)
class MetricHistory(object):
"""Record of multiple executions of a single metric.
It contains a collection of `MetricObservation` instances.
Args:
direction: String. The direction of the metric to optimize. The value
should be "min" or "max".
"""
def __init__(self, direction="min"):
if direction not in {"min", "max"}:
raise ValueError(
"`direction` should be one of "
'{"min", "max"}, but got: %s' % (direction,)
)
self.direction = direction
# Mapping step to `MetricObservation`.
self._observations = {}
def update(self, value, step):
if step in self._observations:
self._observations[step].append(value)
else:
self._observations[step] = MetricObservation(value, step=step)
def get_best_value(self):
values = list(obs.mean() for obs in self._observations.values())
if not values:
return None
if self.direction == "min":
return np.nanmin(values)
return np.nanmax(values)
def get_best_step(self):
best_value = self.get_best_value()
if best_value is None:
return None
for obs in self._observations.values():
if obs.mean() == best_value:
return obs.step
def get_history(self):
return sorted(self._observations.values(), key=lambda obs: obs.step)
def set_history(self, observations):
for obs in observations:
self.update(obs.value, step=obs.step)
def get_statistics(self):
history = self.get_history()
history_values = [obs.mean() for obs in history]
if not len(history_values):
return {}
return {
"min": float(np.nanmin(history_values)),
"max": float(np.nanmax(history_values)),
"mean": float(np.nanmean(history_values)),
"median": float(np.nanmedian(history_values)),
"var": float(np.nanvar(history_values)),
"std": float(np.nanstd(history_values)),
}
def get_last_value(self):
history = self.get_history()
if history:
last_obs = history[-1]
return last_obs.mean()
else:
return None
def get_config(self):
config = {}
config["direction"] = self.direction
config["observations"] = [obs.get_config() for obs in self.get_history()]
return config
@classmethod
def from_config(cls, config):
instance = cls(config["direction"])
instance.set_history(
[MetricObservation.from_config(obs) for obs in config["observations"]]
)
return instance
def to_proto(self):
return keras_tuner_pb2.MetricHistory(
observations=[obs.to_proto() for obs in self.get_history()],
maximize=self.direction == "max",
)
@classmethod
def from_proto(cls, proto):
direction = "max" if proto.maximize else "min"
instance = cls(direction)
instance.set_history(
[MetricObservation.from_proto(p) for p in proto.observations]
)
return instance
class MetricsTracker(object):
"""Record of the values of multiple executions of all metrics.
It contains `MetricHistory` instances for the metrics.
Args:
metrics: List of strings of the names of the metrics.
"""
def __init__(self, metrics=None):
# str -> MetricHistory
self.metrics = {}
self.register_metrics(metrics)
def exists(self, name):
return name in self.metrics
def register_metrics(self, metrics=None):
metrics = metrics or []
for metric in metrics:
self.register(metric.name)
def register(self, name, direction=None):
if self.exists(name):
raise ValueError("Metric already exists: %s" % (name,))
if direction is None:
direction = infer_metric_direction(name)
if direction is None:
# Objective direction is handled separately, but
# non-objective direction defaults to min.
direction = "min"
self.metrics[name] = MetricHistory(direction)
def update(self, name, value, step=0):
value = float(value)
if not self.exists(name):
self.register(name)
prev_best = self.metrics[name].get_best_value()
self.metrics[name].update(value, step=step)
new_best = self.metrics[name].get_best_value()
improved = new_best != prev_best
return improved
def get_history(self, name):
self._assert_exists(name)
return self.metrics[name].get_history()
def set_history(self, name, observations):
assert type(observations) == list
if not self.exists(name):
self.register(name)
self.metrics[name].set_history(observations)
def get_best_value(self, name):
self._assert_exists(name)
return self.metrics[name].get_best_value()
def get_best_step(self, name):
self._assert_exists(name)
return self.metrics[name].get_best_step()
def get_statistics(self, name):
self._assert_exists(name)
return self.metrics[name].get_statistics()
def get_last_value(self, name):
self._assert_exists(name)
return self.metrics[name].get_last_value()
def get_direction(self, name):
self._assert_exists(name)
return self.metrics[name].direction
def get_config(self):
return {
"metrics": {
name: metric_history.get_config()
for name, metric_history in self.metrics.items()
}
}
@classmethod
def from_config(cls, config):
instance = cls()
instance.metrics = {
name: MetricHistory.from_config(metric_history)
for name, metric_history in config["metrics"].items()
}
return instance
def to_proto(self):
return keras_tuner_pb2.MetricsTracker(
metrics={
name: metric_history.to_proto()
for name, metric_history in self.metrics.items()
}
)
@classmethod
def from_proto(cls, proto):
instance = cls()
instance.metrics = {
name: MetricHistory.from_proto(metric_history)
for name, metric_history in proto.metrics.items()
}
return instance
def _assert_exists(self, name):
if name not in self.metrics:
raise ValueError("Unknown metric: %s" % (name,))
_MAX_METRICS = (
"Accuracy",
"BinaryAccuracy",
"CategoricalAccuracy",
"SparseCategoricalAccuracy",
"TopKCategoricalAccuracy",
"SparseTopKCategoricalAccuracy",
"TruePositives",
"TrueNegatives",
"Precision",
"Recall",
"AUC",
"SensitivityAtSpecificity",
"SpecificityAtSensitivity",
)
_MAX_METRIC_FNS = (
"accuracy",
"categorical_accuracy",
"binary_accuracy",
"sparse_categorical_accuracy",
)
def infer_metric_direction(metric):
# Handle str input and get canonical object.
if isinstance(metric, six.string_types):
metric_name = metric
if metric_name.startswith("val_"):
metric_name = metric_name.replace("val_", "", 1)
if metric_name.startswith("weighted_"):
metric_name = metric_name.replace("weighted_", "", 1)
# Special-cases (from `keras/engine/training_utils.py`)
if metric_name in {"loss", "crossentropy", "ce"}:
return "min"
elif metric_name == "acc":
return "max"
try:
metric = keras.metrics.get(metric_name)
except ValueError:
try:
metric = keras.losses.get(metric_name)
except:
# Direction can't be inferred.
return None
# Metric class, Loss class, or function.
if isinstance(metric, (keras.metrics.Metric, keras.losses.Loss)):
name = metric.__class__.__name__
if name == "MeanMetricWrapper":
name = metric._fn.__name__
else:
name = metric.__name__
if name in _MAX_METRICS or name in _MAX_METRIC_FNS:
return "max"
elif hasattr(keras.metrics, name) or hasattr(keras.losses, name):
return "min"
# Direction can't be inferred.
return None
|
|
# -*- coding: utf-8 -*-
from hashlib import sha256
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models, IntegrityError
from django.utils.text import slugify
from django.core.exceptions import ValidationError
class Migration(SchemaMigration):
def forwards(self, orm):
from opps.feedcrawler.processors.rss import RSSProcessor
from opps.feedcrawler.models import Feed, Entry
feeds = Feed.objects.all()
feeds_ids = [feed.pk for feed in feeds if isinstance(feed.get_processor(), RSSProcessor)]
entries = Entry.objects.filter(entry_feed_id__in=feeds_ids).prefetch_related('entry_feed').order_by('-pk')
for item in entries:
try:
entry_json = item.load_json()
except ValidationError:
continue
old_slug = item.slug
entry_hash = sha256(entry_json.get('id') or item.entry_link)
item.slug = slugify(u'{}-{}'.format(item.entry_feed.slug, entry_hash.hexdigest()))
if old_slug != item.slug:
if Entry.objects.filter(slug=item.slug).exists():
item.slug = old_slug
item.published = False
item.save()
def backwards(self, orm):
pass
models = {
u'accounts.customuser': {
'Meta': {'object_name': 'CustomUser'},
'accept_terms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'complement': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cpf': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'district': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'football_club': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'main_image': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'rg': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'topic': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'channels.channel': {
'Meta': {'ordering': "[u'name', u'parent__id', u'published']", 'unique_together': "((u'site', u'long_slug', u'slug', u'parent'),)", 'object_name': 'Channel'},
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hat': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_in_main_rss': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'layout': ('django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '250', 'db_index': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'long_slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['images.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'mirror_site': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'channels_channel_mirror_site'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['sites.Site']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'paginate_by': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "u'subchannel'", 'null': 'True', 'to': u"orm['channels.Channel']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'show_in_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.CustomUser']"})
},
u'containers.container': {
'Meta': {'ordering': "['-date_available']", 'unique_together': "(('site', 'channel', 'slug'),)", 'object_name': 'Container'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['channels.Channel']"}),
'channel_long_slug': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'channel_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '140', 'null': 'True', 'blank': 'True'}),
'child_app_label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_class': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_module': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '120', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'hat': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['images.Image']", 'null': 'True', 'through': u"orm['containers.ContainerImage']", 'blank': 'True'}),
'json': ('opps.db.models.fields.jsonf.JSONField', [], {'null': 'True', 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'containers_container_mainimage'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['images.Image']"}),
'main_image_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'mirror_channel': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'containers_container_mirror_channel'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['channels.Channel']"}),
'mirror_site': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'containers_container_mirror_site'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['sites.Site']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_containers.container_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'related_containers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'container_relatedcontainers'", 'to': u"orm['containers.Container']", 'through': u"orm['containers.ContainerRelated']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'show_on_root_channel': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.CustomUser']"})
},
u'containers.containerimage': {
'Meta': {'ordering': "('order',)", 'object_name': 'ContainerImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['containers.Container']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['images.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'containers.containerrelated': {
'Meta': {'ordering': "('order',)", 'object_name': 'ContainerRelated'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'containerrelated_container'", 'to': u"orm['containers.Container']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'related': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'containers_containerrelated_container'", 'to': u"orm['containers.Container']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'feedcrawler.entry': {
'Meta': {'ordering': "['-entry_published_time']", 'object_name': 'Entry', '_ormbases': [u'containers.Container']},
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['containers.Container']", 'unique': 'True', 'primary_key': 'True'}),
'entry_category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'entry_category_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'entry_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entry_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entry_feed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedcrawler.Feed']"}),
'entry_json': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entry_link': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'entry_original_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entry_published_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'entry_pulled_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'entry_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'post_created': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'feedcrawler.feed': {
'Meta': {'ordering': "['title']", 'object_name': 'Feed'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['channels.Channel']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedcrawler.FeedType']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedcrawler.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.PositiveIntegerField', [], {'default': '20', 'null': 'True', 'blank': 'True'}),
'last_polled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feed_image'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['images.Image']"}),
'max_entries': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mirror_site': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'feedcrawler_feed_mirror_site'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['sites.Site']"}),
'publish_entries': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'published_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'source_json_params': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source_password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'source_port': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'source_root_folder': ('django.db.models.fields.CharField', [], {'default': "'/'", 'max_length': '255'}),
'source_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.CustomUser']"})
},
u'feedcrawler.feedtype': {
'Meta': {'object_name': 'FeedType'},
'actions': ('django.db.models.fields.CharField', [], {'default': "'opps.feedcrawler.actions.rss.RSSActions'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'processor': ('django.db.models.fields.CharField', [], {'default': "'opps.feedcrawler.processors.rss.RSSProcessor'", 'max_length': '255'})
},
u'feedcrawler.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'})
},
u'feedcrawler.processlog': {
'Meta': {'object_name': 'ProcessLog'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedcrawler.Feed']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'images.image': {
'Meta': {'object_name': 'Image'},
'archive': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'archive_link': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'crop_example': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'crop_x1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_x2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fit_in': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'halign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mirror_site': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'images_image_mirror_site'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['sites.Site']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'smart': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.CustomUser']"}),
'valign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['feedcrawler']
|
|
import math
import torch
####################################
# Problem Class A. Single equations.
####################################
def A1():
diffeq = lambda t, y: -y
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: torch.exp(-t)
return diffeq, init, solution
def A2():
diffeq = lambda t, y: -y**3 / 2
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: 1 / torch.sqrt(t + 1)
return diffeq, init, solution
def A3():
diffeq = lambda t, y: y * torch.cos(t)
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: torch.exp(torch.sin(t))
return diffeq, init, solution
def A4():
diffeq = lambda t, y: y / 4 * (1 - y / 20)
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: 20 / (1 + 19 * torch.exp(-t / 4))
return diffeq, init, solution
def A5():
diffeq = lambda t, y: (y - t) / (y + t)
init = lambda: (torch.tensor(0.), torch.tensor(4.))
return diffeq, init, None
#################################
# Problem Class B. Small systems.
#################################
def B1():
def diffeq(t, y):
dy0 = 2 * (y[0] - y[0] * y[1])
dy1 = -(y[1] - y[0] * y[1])
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([1., 3.])
return diffeq, init, None
def B2():
A = torch.tensor([[-1., 1., 0.], [1., -2., 1.], [0., 1., -1.]])
def diffeq(t, y):
dy = torch.mv(A, y)
return dy
def init():
return torch.tensor(0.), torch.tensor([2., 0., 1.])
return diffeq, init, None
def B3():
def diffeq(t, y):
dy0 = -y[0]
dy1 = y[0] - y[1] * y[1]
dy2 = y[1] * y[1]
return torch.stack([dy0, dy1, dy2])
def init():
return torch.tensor(0.), torch.tensor([1., 0., 0.])
return diffeq, init, None
def B4():
def diffeq(t, y):
a = torch.sqrt(y[0] * y[0] + y[1] * y[1])
dy0 = -y[1] - y[0] * y[2] / a
dy1 = y[0] - y[1] * y[2] / a
dy2 = y[0] / a
return torch.stack([dy0, dy1, dy2])
def init():
return torch.tensor(0.), torch.tensor([3., 0., 0.])
return diffeq, init, None
def B5():
def diffeq(t, y):
dy0 = y[1] * y[2]
dy1 = -y[0] * y[2]
dy2 = -0.51 * y[0] * y[1]
return torch.stack([dy0, dy1, dy2])
def init():
return torch.tensor(0.), torch.tensor([0., 1., 1.])
return diffeq, init, None
####################################
# Problem Class C. Moderate systems.
####################################
def C1():
A = torch.zeros(10, 10)
A.view(-1)[:-1:11] = -1
A.view(-1)[10::11] = 1
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(10)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C2():
A = torch.zeros(10, 10)
A.view(-1)[:-1:11] = torch.linspace(-1, -9, 9)
A.view(-1)[10::11] = torch.linspace(1, 9, 9)
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(10)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C3():
n = 10
A = torch.zeros(n, n)
A.view(-1)[::n + 1] = -2
A.view(-1)[n::n + 1] = 1
A.view(-1)[1::n + 1] = 1
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(n)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C4():
n = 51
A = torch.zeros(n, n)
A.view(-1)[::n + 1] = -2
A.view(-1)[n::n + 1] = 1
A.view(-1)[1::n + 1] = 1
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(n)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C5():
k2 = torch.tensor(2.95912208286)
m0 = torch.tensor(1.00000597682)
m = torch.tensor([
0.000954786104043,
0.000285583733151,
0.0000437273164546,
0.0000517759138449,
0.00000277777777778,
]).view(1, 5)
def diffeq(t, y):
# y is 2 x 3 x 5
# y[0] contains y, y[0] contains y'
# second axis indexes space (x,y,z).
# third axis indexes 5 bodies.
dy = y[1, :, :]
y = y[0]
r = torch.sqrt(torch.sum(y**2, 0)).view(1, 5)
d = torch.sqrt(torch.sum((y[:, :, None] - y[:, None, :])**2, 0))
F = m.view(1, 1, 5) * ((y[:, None, :] - y[:, :, None]) / (d * d * d).view(1, 5, 5) + y.view(3, 1, 5) /
(r * r * r).view(1, 1, 5))
F.view(3, 5 * 5)[:, ::6] = 0
ddy = k2 * (-(m0 + m) * y / (r * r * r)) + F.sum(2)
return torch.stack([dy, ddy], 0)
def init():
y0 = torch.tensor([
3.42947415189, 3.35386959711, 1.35494901715, 6.64145542550, 5.97156957878, 2.18231499728, 11.2630437207,
14.6952576794, 6.27960525067, -30.1552268759, 165699966404, 1.43785752721, -21.1238353380, 28.4465098142,
15.388265967
]).view(5, 3).transpose(0, 1)
dy0 = torch.tensor([
-.557160570446, .505696783289, .230578543901, -.415570776342, .365682722812, .169143213293, -.325325669158,
.189706021964, .0877265322780, -.0240476254170, -.287659532608, -.117219543175, -.176860753121,
-.216393453025, -.0148647893090
]).view(5, 3).transpose(0, 1)
return torch.tensor(0.), torch.stack([y0, dy0], 0)
return diffeq, init, None
###################################
# Problem Class D. Orbit equations.
###################################
def _DTemplate(eps):
def diffeq(t, y):
r = (y[0]**2 + y[1]**2)**(3 / 2)
dy0 = y[2]
dy1 = y[3]
dy2 = -y[0] / r
dy3 = -y[1] / r
return torch.stack([dy0, dy1, dy2, dy3])
def init():
return torch.tensor(0.), torch.tensor([1 - eps, 0, 0, math.sqrt((1 + eps) / (1 - eps))])
return diffeq, init, None
D1 = lambda: _DTemplate(0.1)
D2 = lambda: _DTemplate(0.3)
D3 = lambda: _DTemplate(0.5)
D4 = lambda: _DTemplate(0.7)
D5 = lambda: _DTemplate(0.9)
##########################################
# Problem Class E. Higher order equations.
##########################################
def E1():
def diffeq(t, y):
dy0 = y[1]
dy1 = -(y[1] / (t + 1) + (1 - 0.25 / (t + 1)**2) * y[0])
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([.671396707141803, .0954005144474744])
return diffeq, init, None
def E2():
def diffeq(t, y):
dy0 = y[1]
dy1 = (1 - y[0]**2) * y[1] - y[0]
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([2., 0.])
return diffeq, init, None
def E3():
def diffeq(t, y):
dy0 = y[1]
dy1 = y[0]**3 / 6 - y[0] + 2 * torch.sin(2.78535 * t)
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([0., 0.])
return diffeq, init, None
def E4():
def diffeq(t, y):
dy0 = y[1]
dy1 = .32 - .4 * y[1]**2
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([30., 0.])
return diffeq, init, None
def E5():
def diffeq(t, y):
dy0 = y[1]
dy1 = torch.sqrt(1 + y[1]**2) / (25 - t)
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([0., 0.])
return diffeq, init, None
###################
# Helper functions.
###################
def _to_tensor(x):
if not torch.is_tensor(x):
x = torch.tensor(x)
return x
|
|
import utils
from config import Config
from parser import Parser
from dataset import DataSet
from network import Network
import eval_performance as perf
import sys
import time
import pickle
import threading
import numpy as np
import tensorflow as tf
from os import path
from copy import deepcopy
from tabulate import tabulate
class DeepDOPE(object):
def __init__(self, config):
self.config = config
self.patience = self.config.patience
self.learning_rate = self.config.solver.learning_rate
self.dataset = self.load_data()
self.ph_lr, self.ph_keep_prob_in, self.ph_keep_prob_out, self.ph_wce, self.ph_batch_size = self.get_placeholders()
# Setup Data Queue
self.ph_ids, self.ph_x_attr, self.ph_x_labels, self.ph_x_lengths, self.ph_y_label, \
self.ph_node_id = self.get_queue_placeholders()
self.Q, self.enqueue_op, self.dequeue_op = self.setup_data_queues()
self.ids, self.x_attr, self.x_labels, self.x_lengths, self.y_labels, self.node_id = self.dequeue_op
self.arch = self.add_network(config)
# Learn a representation for information diffusion across each path
self.neighbor_data, self.NOI_x, self.N_neighbors = self.arch.get_path_data(self.x_attr, self.x_labels,
self.x_lengths, self.ph_keep_prob_in, self.ph_keep_prob_out)
# Get Node of Interest's data
self.NOI_data = self.arch.get_NOI_data(self.NOI_x, self.ph_keep_prob_in)
self.path_ensemble_outputs = self.arch.attentive_ensemble(self.NOI_data, self.neighbor_data)
with tf.variable_scope('Predictions') as scope:
self.att_prediction = self.arch.predict(self.NOI_data, None, self.ph_keep_prob_out)
scope.reuse_variables()
# Get individual path predictions
self.path_predictions = self.arch.predict(self.neighbor_data, None, self.ph_keep_prob_out)
# combine diffusion over different paths attentively based on NOI
self.path_prediction = self.arch.predict(self.path_ensemble_outputs, None, self.ph_keep_prob_out)
# combine the ensmebled path data with the NOI data and predict labels
self.combined_prediction = self.arch.predict(self.NOI_data, self.path_ensemble_outputs, self.ph_keep_prob_out)
self.predictions = [self.att_prediction, self.path_prediction, self.combined_prediction]
#Losses
self.consensus_loss = self.arch.consensus_loss(self.path_predictions, self.path_prediction)
self.node_loss = self.arch.loss(self.att_prediction, self.y_labels, self.ph_wce)
self.path_loss = self.arch.loss(self.path_prediction, self.y_labels, self.ph_wce)
self.combined_loss = self.arch.loss(self.combined_prediction, self.y_labels, self.ph_wce)
self.total_loss = self.combined_loss + self.config.solver.path_loss*self.path_loss + \
self.config.solver.node_loss*self.node_loss + \
self.config.solver.consensus_loss*self.consensus_loss
self.losses = [self.node_loss, self.path_loss, self.combined_loss, self.total_loss, self.consensus_loss]
#Optimizer
self.optimizer = self.config.solver._optimizer(self.ph_lr)
train = self.arch.custom_training(self.total_loss, self.optimizer, self.config.batch_size)
self.reset_grads, self.accumulate_op, self.update_op = train
#self.train = self.arch.training(self.loss, self.optimizer)
self.saver = tf.train.Saver()
self.summary = tf.summary.merge_all()
self.step_incr_op = self.arch.global_step.assign(self.arch.global_step + 1)
self.init = tf.global_variables_initializer()
def load_and_enqueue(self, sess, data):
for idx, (ids, x_attr, x_labels, x_lengths, label, node_id) in enumerate(self.dataset.walks_generator(data)):
feed_dict = self.create_feed_dict([ids], [x_attr], [x_labels], [x_lengths], [label], [node_id])
sess.run(self.enqueue_op, feed_dict=feed_dict)
def load_data(self):
# Get the 'encoded data'
dataset = DataSet(self.config)
self.config.data_sets._len_labels = dataset.n_labels
self.config.data_sets._len_features = dataset.n_features
self.config.data_sets._multi_label = dataset.multi_label
self.config.data_sets._n_nodes = dataset.n_nodes
self.config.num_steps = dataset.diameter + 1
print('--------- Project Path: ' + self.config.codebase_root_path + self.config.project_name)
return dataset
def get_queue_placeholders(self):
# 0th axis should have same size for all tensord in the Queue
ids_placeholder = tf.placeholder(tf.int32, name='Walk_ids', shape=[1, self.config.num_steps, None])
x_attr_placeholder = tf.placeholder(tf.float32, name='Input',
shape=[1, self.config.num_steps, None, self.config.data_sets._len_features])
x_labels_placeholder = tf.placeholder(tf.float32, name='label_inputs',
shape=[1, self.config.num_steps, None, self.config.data_sets._len_labels])
x_lengths_placeholder = tf.placeholder(tf.int32, name='walk_lengths', shape=[1, None])
y_label_placeholder = tf.placeholder(tf.float32, name='Target', shape=[1, 1, self.config.data_sets._len_labels])
node_id_placeholder = tf.placeholder(tf.int32, name='node_id', shape=[1])
return ids_placeholder, x_attr_placeholder, x_labels_placeholder, x_lengths_placeholder, y_label_placeholder, node_id_placeholder
def get_placeholders(self):
lr = tf.placeholder(tf.float32, name='learning_rate')
keep_prob_in = tf.placeholder(tf.float32, name='keep_prob_in')
keep_prob_out = tf.placeholder(tf.float32, name='keep_prob_out')
batch_size = tf.placeholder(tf.float32, name='batch_size')
wce_placeholder = tf.placeholder(tf.float32, shape=[self.config.data_sets._len_labels], name='Cross_entropy_weights')
return lr, keep_prob_in, keep_prob_out, wce_placeholder, batch_size
def setup_data_queues(self):
Q = tf.FIFOQueue(capacity=50, dtypes=[tf.int32, tf.float32, tf.float32, tf.int32, tf.float32, tf.int32])
enqueue_op = Q.enqueue_many([self.ph_ids, self.ph_x_attr, self.ph_x_labels, self.ph_x_lengths,
self.ph_y_label, self.ph_node_id])
dequeue_op = Q.dequeue()
return Q, enqueue_op, dequeue_op
def create_feed_dict(self, ids, x_attr, x_labels, x_lengths, label_batch, node_id):
feed_dict = {
self.ph_ids: ids,
self.ph_x_attr: x_attr,
self.ph_x_labels: x_labels,
self.ph_x_lengths: x_lengths,
self.ph_y_label: label_batch,
self.ph_node_id: node_id,
self.ph_batch_size: self.config.batch_size}
return feed_dict
def add_network(self, config):
return Network(config)
def add_summaries(self, sess):
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer_train = tf.summary.FileWriter(self.config.logs_dir + "train", sess.graph)
summary_writer_val = tf.summary.FileWriter(self.config.logs_dir + "validation", sess.graph)
summary_writer_test = tf.summary.FileWriter(self.config.logs_dir + "test", sess.graph)
summary_writers = {'train': summary_writer_train, 'val': summary_writer_val, 'test': summary_writer_test}
return summary_writers
def add_metrics(self, metrics):
"""assign and add summary to a metric tensor"""
for i, metric in enumerate(self.config.metrics):
tf.summary.scalar(metric, metrics[i])
def print_metrics(self, inp):
for idx, item in enumerate(inp):
print(self.config.metrics[idx], ": ", item)
def run_epoch(self, sess, data, train_op=None, summary_writer=None, verbose=10, learning_rate=0):
train = train_op
if train_op is None:
train_op = tf.no_op()
keep_prob_in = 1
keep_prob_out = 1
else:
keep_prob_in = self.config.mRNN._keep_prob_in
keep_prob_out = self.config.mRNN._keep_prob_out
# Set up all variables
total_steps = np.sum(self.dataset.get_nodes(data)) # Number of Nodes to run through
verbose = min(verbose, total_steps) - 1
node_ids, gradients, targets, attn_values = [], [], [], []
losses, predictions, metrics= dict(), dict(), dict()
metrics['node'], metrics['path'], metrics['combined'] = [], [], []
predictions['node'], predictions['path'], predictions['combined'] = [], [], []
losses['node'], losses['path'], losses['combined'], losses['consensus'], losses['total'] = [], [], [], [], []
########################################################################################################
feed_dict = {self.ph_keep_prob_in: keep_prob_in, self.ph_keep_prob_out: keep_prob_out,
self.ph_wce: self.dataset.wce, self.ph_lr: learning_rate}
# Reset grad accumulator at the beginning
sess.run([self.reset_grads], feed_dict=feed_dict)
#Start Running Queue
t = threading.Thread(target=self.load_and_enqueue, args=[sess, data])
t.daemon = True
t.start()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
step = 0
while step < total_steps:
step += 1
feed_dict = {self.ph_keep_prob_in: keep_prob_in, self.ph_keep_prob_out: keep_prob_out,
self.ph_wce: self.dataset.wce, self.ph_lr: learning_rate}
if step < total_steps - 1:
id, grads, t_losses, t_pred_probs, target_label, t_attn_values = \
sess.run([self.node_id, train_op, self.losses, self.predictions, self.y_labels,
self.arch.attn_values], feed_dict=feed_dict)
else:
summary, id, grads, t_losses, t_pred_probs, target_label, t_attn_values = \
sess.run([self.summary, self.node_id, train_op, self.losses, self.predictions, self.y_labels,
self.arch.attn_values], feed_dict=feed_dict)
if summary_writer is not None:
summary_writer.add_summary(summary, self.arch.global_step.eval(session=sess))
summary_writer.flush()
node_ids.append(id)
# Accumulate attention values
attn_values.append(t_attn_values)
# Accumulate losses
losses['node'].append(t_losses[0])
losses['path'].append(t_losses[1])
losses['combined'].append(t_losses[2])
losses['total'].append(t_losses[3])
losses['consensus'].append(t_losses[4])
# Accumulate Predictions
for i, k in enumerate(predictions.keys()):
pred_labels = np.zeros([self.config.data_sets._len_labels], dtype=np.int32)
pred_labels[np.argmax(t_pred_probs[i])] = 1
predictions[k].append(pred_labels.copy())
targets.append(np.squeeze(target_label))
if train is not None:
# get the absolute maximum gradient to each variable
gradients.append([np.max(np.abs(item)) for item in grads])
if train and (step % self.config.batch_size == 0 or step == total_steps):
# Update gradients after batch_size or at the end of the current epoch
batch_size = self.config.batch_size
if step == total_steps:
batch_size = step%batch_size
feed_dict[self.ph_batch_size] = batch_size
sess.run([self.update_op], feed_dict=feed_dict)
sess.run([self.reset_grads], feed_dict=feed_dict)
if verbose and self.config.solver.gradients:
print("%d/%d :: " % (step, total_steps), end="")
for var, val in zip(['-'.join(k.name.split('/')[-2:]) for k in tf.trainable_variables()],
np.mean(gradients, axis=0)):
print("%s :: %.8f " % (var, val / self.config.batch_size), end="")
print("\n")
sys.stdout.flush()
# Average statistics over batches
for k in losses.keys():
losses[k] = np.mean(losses[k])
for k in metrics.keys():
metrics[k] = perf.evaluate(np.asarray(predictions[k]), np.asarray(targets), 0)
coord.request_stop()
coord.join(threads)
return node_ids, predictions, losses, metrics, np.asarray(attn_values)
def fit(self, sess, summary_writers):
patience = self.config.patience
learning_rate = self.config.solver.learning_rate
inner_epoch, best_epoch, best_val_loss = 0, 0, 1e6
nodes = {'train': None, 'val': None, 'test': None}
losses = {'train': None, 'val': None, 'test': None}
metrics = {'train': None, 'val': None, 'test': None}
attn_values = {'train': None, 'val': None, 'test': None}
predictions = {'train': None, 'val': None, 'test': None}
best_losses, best_metrics, best_predictions = deepcopy(losses), deepcopy(metrics), deepcopy(predictions)
while inner_epoch < self.config.max_inner_epochs:
inner_epoch += 1
nodes['train'], predictions['train'], losses['train'], metrics['train'], attn_values['train'] = \
self.run_epoch(sess, data='train', train_op=self.accumulate_op, summary_writer=summary_writers['train'],
learning_rate=learning_rate)
if inner_epoch % self.config.val_epochs_freq == 0:
nodes['val'], predictions['val'], losses['val'], metrics['val'], attn_values['val'] = \
self.run_epoch(sess, data='val', train_op=None, summary_writer=summary_writers['val'], verbose=0)
if self.config.run_test:
nodes['test'], predictions['test'], losses['test'], metrics['test'], attn_values['test'] = \
self.run_epoch(sess, data='test', train_op=None, summary_writer=summary_writers['test'], verbose=0)
self.print_inner_loop_stats(inner_epoch, metrics, losses)
else:
print('---------- Epoch %d: tr_loss = %.2f val_loss %.2f || tr_micro = %.2f, val_micro = %.2f || '
'tr_acc = %.2f, val_acc = %.2f ' %
(inner_epoch, losses['train']['combined'], losses['val']['combined'],
metrics['train']['combined']['micro_f1'], metrics['val']['combined']['micro_f1'],
metrics['train']['combined']['accuracy'], metrics['val']['combined']['accuracy']))
new_val_loss = losses['val']['node'] + losses['val']['combined'] + losses['val']['path']
if new_val_loss < best_val_loss:
if new_val_loss < (best_val_loss * self.config.improvement_threshold):
self.patience = self.config.patience
best_epoch = inner_epoch
best_losses = losses
best_metrics = metrics
best_predictions = predictions
self.saver.save(sess, self.config.ckpt_dir + 'inner-last-best')
best_val_loss = new_val_loss
else:
if patience < 1:
# Restore the best parameters
self.saver.restore(sess, self.config.ckpt_dir + 'inner-last-best')
if learning_rate <= 0.00001:
print('Stopping by patience method')
break
else:
learning_rate /= 10
patience = self.config.patience
print('Learning rate dropped to %.8f' % learning_rate)
else:
patience -= 1
print('Best epoch: ', best_epoch)
# Run Test set
if not self.config.run_test:
nodes['test'], best_predictions['test'], losses['test'], best_metrics['test'], attn_values['test'] = \
self.run_epoch(sess, data='test', train_op=None, summary_writer=summary_writers['test'], verbose=0)
# UPDATE LABEL CACHE
self.dataset.update_label_cache('train', best_predictions['train']['combined'], ids=nodes['train'])
self.dataset.update_label_cache('val', best_predictions['val']['combined'], ids=nodes['val'])
self.dataset.update_label_cache('test', best_predictions['test']['combined'], ids=nodes['test'])
return inner_epoch, nodes, best_losses, best_metrics, attn_values
def fit_outer(self, sess, summary_writers):
stats = []
outer_epoch = 1
flag = self.config.boot_reset
patience = 1
metrics = {'train': None, 'val': None, 'test': None}
best_val_loss, best_metrics, best_attn_values = 1e6, None, None
while outer_epoch <= self.config.max_outer_epochs:
print('OUTER_EPOCH: ', outer_epoch)
if outer_epoch == 2 and flag: # reset after first bootstrap | Shall we reuse the weights ???
print("------ Graph Reset | First bootstrap done -----")
sess.run(self.init) # reset all weights
flag = False
# Just to monitor the trainable variables in tf graph
# print([v.name for v in tf.trainable_variables()], "\n")
start = time.time()
# Fit the model to predict best possible labels given the current estimates of unlabeled values
inner_epoch, nodes, losses, metrics, attn_values = self.fit(sess, summary_writers)
duration = time.time() - start
stats.append(
np.round([outer_epoch, inner_epoch,
losses['train']['combined'], losses['val']['combined'], losses['test']['combined'],
metrics['train']['combined']['micro_f1'], metrics['val']['combined']['micro_f1'], metrics['test']['combined']['micro_f1'],
metrics['train']['combined']['accuracy'], metrics['val']['combined']['accuracy'], metrics['test']['combined']['accuracy'],
duration], decimals=3))
print('Outer Epoch %d: tr_loss = %.2f, val_loss %.3f te_loss %.3f|| '
'tr_micro = %.2f, val_micro = %.2f te_micro = %.3f|| '
'tr_acc = %.2f, val_acc = %.2f te_acc = %.3f (%.3f sec)' %
(inner_epoch, losses['train']['combined'], losses['val']['combined'], losses['test']['combined'],
metrics['train']['combined']['micro_f1'], metrics['val']['combined']['micro_f1'], metrics['test']['combined']['micro_f1'],
metrics['train']['combined']['accuracy'], metrics['val']['combined']['accuracy'], metrics['test']['combined']['accuracy'],
duration))
new_val_loss = losses['val']['combined'] + losses['train']['combined']
if patience >= 1 and (new_val_loss < best_val_loss):
if new_val_loss < (best_val_loss * self.config.improvement_threshold):
patience = 2
best_metrics = metrics
best_attn_values = attn_values
best_val_loss = new_val_loss
else:
patience -= 1
if patience < 1:
break
outer_epoch += 1
headers = ['Epoch', 'I_Epoch', 'TR_LOSS', 'VAL_LOSS', 'TE_LOSS', 'TR_MICRO', 'VAL_MACRO', 'TE_MACRO',
'TR_ACC', 'VAL_ACC', 'TE_ACC', 'DURATION']
stats = tabulate(stats, headers)
print(stats)
print('Best Test Results || Accuracy %.3f | MICRO %.3f | MACRO %.3f' %
(metrics['test']['combined']['accuracy'], metrics['test']['combined']['micro_f1'], metrics['test']['combined']['macro_f1']))
return stats, nodes, best_metrics, best_attn_values
def print_inner_loop_stats(self, inner_epoch, metrics, losses):
print('---------- Epoch %d: tr_loss = %.2f val_loss %.2f te_loss %.2f ||'
' tr_micro = %.2f, val_micro = %.2f te_micro = %.2f|| '
'tr_acc = %.2f, val_acc = %.2f te_acc = %.2f ' %
(inner_epoch, losses['train']['combined'], losses['val']['combined'], losses['test']['combined'],
metrics['train']['combined']['micro_f1'], metrics['val']['combined']['micro_f1'], metrics['test']['combined']['micro_f1'],
metrics['train']['combined']['accuracy'], metrics['val']['combined']['accuracy'], metrics['test']['combined']['accuracy']))
print('########################################################################################')
print('#~~~~~~~~~~~~~~~~~~~ tr_node_loss = %.2f val_node_loss %.2f te_node_loss %.2f ||\n'
'#~~~~~~~~~~~~~~~~~~~ tr_path_loss = %.2f val_path_loss %.2f te_path_loss %.2f ||\n'
'#~~~~~~~~~~~~~~~~~~~ tr_comb_loss = %.2f val_comb_loss %.2f te_comb_loss %.2f ||\n'
'#~~~~~~~~~~~~~~~~~~~ tr_consensus_loss = %.2f val_consensus_loss %.2f te_consensus_loss %.2f ||\n'
'#~~~~~~~~~~~~~~~~~~~ tr_total_loss = %.2f val_total_loss %.2f te_total_loss %.2f' %
(losses['train']['consensus'], losses['val']['consensus'], losses['test']['consensus'],
losses['train']['node'], losses['val']['node'], losses['test']['node'],
losses['train']['path'], losses['val']['path'], losses['test']['path'],
losses['train']['combined'], losses['val']['combined'], losses['test']['combined'],
losses['train']['total'], losses['val']['total'], losses['test']['total']))
print('########################################################################################')
print('#~~~~~~~~~~~~~~~~~~~ tr_node_acc %.2f val_node_acc %.2f te_node_acc %.2f ||\n'
'#~~~~~~~~~~~~~~~~~~~ tr_path_acc %.2f val_path_acc %.2f te_path_acc %.2f ||\n'
'#~~~~~~~~~~~~~~~~~~~ tr_comb_acc %.2f val_comb_acc %.2f te_comb_acc %.2f ' %
(metrics['train']['node']['accuracy'], metrics['val']['node']['accuracy'], metrics['test']['node']['accuracy'],
metrics['train']['path']['accuracy'], metrics['val']['path']['accuracy'], metrics['test']['path']['accuracy'],
metrics['train']['combined']['accuracy'], metrics['val']['combined']['accuracy'], metrics['test']['combined']['accuracy']))
def init_model(config):
tf.reset_default_graph()
tf.set_random_seed(1234)
with tf.variable_scope('DEEP_DOPE', reuse=None) as scope:
model = DeepDOPE(config)
tf_config = tf.ConfigProto(allow_soft_placement=True)
tf_config.gpu_options.allow_growth = True
sm = tf.train.SessionManager()
if config.retrain:
print("Loading model from checkpoint")
load_ckpt_dir = config.ckpt_dir
else:
print("No model loaded from checkpoint")
load_ckpt_dir = ''
sess = sm.prepare_session("", init_op=model.init, saver=model.saver, checkpoint_dir=load_ckpt_dir, config=tf_config)
return model, sess
def train_model(cfg):
print('############## Training Module ')
config = deepcopy(cfg)
model, sess = init_model(config)
with sess:
summary_writers = model.add_summaries(sess)
stats, nodes, test_metrics, attn_values = model.fit_outer(sess, summary_writers)
return stats, nodes, test_metrics, attn_values
def main():
args = Parser().get_parser().parse_args()
print("=====Configurations=====\n", args)
cfg = Config(args)
train_percents = args.percents.split('_')
folds = args.folds.split('_')
outer_loop_stats = {}
attention = {}
results = {}
nodes = {}
#Create Main directories
path_prefixes = [cfg.dataset_name, cfg.folder_suffix, cfg.data_sets.label_type]
utils.create_directory_tree(path_prefixes)
for train_percent in train_percents:
cfg.train_percent = train_percent
path_prefix = path.join(path.join(*path_prefixes), cfg.train_percent)
utils.check_n_create(path_prefix)
attention[train_percent] = {}
results[train_percent] = {}
outer_loop_stats[train_percent] = {}
nodes[train_percent] = {}
for fold in folds:
print('Training percent: ', train_percent, ' Fold: ', fold, '---Running')
cfg.train_fold = fold
utils.check_n_create(path.join(path_prefix, cfg.train_fold))
cfg.create_directories(path.join(path_prefix, cfg.train_fold))
outer_loop_stats[train_percent][fold], nodes[train_percent][fold], results[train_percent][fold], \
attention[train_percent][fold] = train_model(cfg)
print('Training percent: ', train_percent, ' Fold: ', fold, '---completed')
path_prefixes = [cfg.dataset_name, cfg.folder_suffix, cfg.data_sets.label_type]
np.save(path.join(*path_prefixes, 'nodes.npy'), nodes)
np.save(path.join(*path_prefixes, 'results.npy'), results)
np.save(path.join(*path_prefixes, 'attentions.npy'), attention)
np.save(path.join(*path_prefixes, 'outer_loop_stats.npy'), outer_loop_stats)
if __name__ == "__main__":
np.random.seed(1234)
main()
|
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for common methods used by iLO modules."""
import mock
import tempfile
from oslo.config import cfg
from ironic.common import images
from ironic.common import states
from ironic.common import swift
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.db import api as dbapi
from ironic.drivers.modules import agent
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import deploy as ilo_deploy
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import pxe
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import context
from ironic.openstack.common import importutils
from ironic.tests import base
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
ilo_client = importutils.try_import('proliantutils.ilo.ribcl')
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
class IloDeployPrivateMethodsTestCase(base.TestCase):
def setUp(self):
super(IloDeployPrivateMethodsTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.context = context.get_admin_context()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='iscsi_ilo', driver_info=INFO_DICT)
def test__get_boot_iso_object_name(self):
boot_iso_actual = ilo_deploy._get_boot_iso_object_name(self.node)
boot_iso_expected = "boot-%s" % self.node.uuid
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(images, 'get_glance_image_property')
@mock.patch.object(ilo_deploy, '_parse_deploy_info')
def test__get_boot_iso_glance_image(self, deploy_info_mock,
image_prop_mock):
deploy_info_mock.return_value = {'image_source': 'image-uuid'}
image_prop_mock.return_value = 'boot-iso-uuid'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_prop_mock.assert_called_once_with(task.context, 'image-uuid',
'boot_iso')
boot_iso_expected = 'glance:boot-iso-uuid'
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(driver_utils, 'get_node_capability')
@mock.patch.object(images, 'get_glance_image_property')
@mock.patch.object(ilo_deploy, '_parse_deploy_info')
def test__get_boot_iso_uefi_no_glance_image(self, deploy_info_mock,
image_prop_mock, get_node_cap_mock):
deploy_info_mock.return_value = {'image_source': 'image-uuid'}
image_prop_mock.return_value = None
get_node_cap_mock.return_value = 'uefi'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_result = ilo_deploy._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_prop_mock.assert_called_once_with(task.context, 'image-uuid',
'boot_iso')
get_node_cap_mock.assert_called_once_with(task.node, 'boot_mode')
self.assertIsNone(boot_iso_result)
@mock.patch.object(tempfile, 'NamedTemporaryFile')
@mock.patch.object(images, 'create_boot_iso')
@mock.patch.object(swift, 'SwiftAPI')
@mock.patch.object(ilo_deploy, '_get_boot_iso_object_name')
@mock.patch.object(images, 'get_glance_image_property')
@mock.patch.object(ilo_deploy, '_parse_deploy_info')
def test__get_boot_iso_create(self, deploy_info_mock, image_prop_mock,
boot_object_name_mock, swift_api_mock,
create_boot_iso_mock, tempfile_mock):
CONF.keystone_authtoken.auth_uri = 'http://authurl'
CONF.ilo.swift_ilo_container = 'ilo-cont'
CONF.pxe.pxe_append_params = 'kernel-params'
swift_obj_mock = swift_api_mock.return_value
fileobj_mock = mock.MagicMock()
fileobj_mock.name = 'tmpfile'
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = fileobj_mock
tempfile_mock.return_value = mock_file_handle
deploy_info_mock.return_value = {'image_source': 'image-uuid'}
image_prop_mock.side_effect = [None, 'kernel-uuid', 'ramdisk-uuid']
boot_object_name_mock.return_value = 'abcdef'
create_boot_iso_mock.return_value = '/path/to/boot-iso'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_prop_mock.assert_any_call(task.context, 'image-uuid',
'boot_iso')
image_prop_mock.assert_any_call(task.context, 'image-uuid',
'kernel_id')
image_prop_mock.assert_any_call(task.context, 'image-uuid',
'ramdisk_id')
boot_object_name_mock.assert_called_once_with(task.node)
create_boot_iso_mock.assert_called_once_with(task.context,
'tmpfile', 'kernel-uuid', 'ramdisk-uuid',
'root-uuid', 'kernel-params')
swift_obj_mock.create_object.assert_called_once_with('ilo-cont',
'abcdef',
'tmpfile')
boot_iso_expected = 'swift:abcdef'
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(ilo_deploy, '_get_boot_iso_object_name')
@mock.patch.object(swift, 'SwiftAPI')
def test__clean_up_boot_iso_for_instance(self, swift_mock,
boot_object_name_mock):
swift_obj_mock = swift_mock.return_value
CONF.ilo.swift_ilo_container = 'ilo-cont'
boot_object_name_mock.return_value = 'boot-object'
ilo_deploy._clean_up_boot_iso_for_instance(self.node)
swift_obj_mock.delete_object.assert_called_once_with('ilo-cont',
'boot-object')
def test__get_single_nic_with_vif_port_id(self):
obj_utils.create_test_port(self.context, node_id=self.node.id, id=6,
address='aa:bb:cc', uuid=utils.generate_uuid(),
extra={'vif_port_id': 'test-vif-A'}, driver='iscsi_ilo')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
address = ilo_deploy._get_single_nic_with_vif_port_id(task)
self.assertEqual('aa:bb:cc', address)
@mock.patch.object(deploy_utils, 'check_for_missing_params')
def test__parse_driver_info(self, check_params_mock):
self.node.driver_info['ilo_deploy_iso'] = 'deploy-iso-uuid'
driver_info_expected = {'ilo_deploy_iso': 'deploy-iso-uuid'}
driver_info_actual = ilo_deploy._parse_driver_info(self.node)
error_msg = 'Error validating iLO virtual media deploy'
check_params_mock.assert_called_once_with(driver_info_expected,
error_msg)
self.assertEqual(driver_info_expected, driver_info_actual)
@mock.patch.object(ilo_deploy, '_parse_driver_info')
@mock.patch.object(iscsi_deploy, 'parse_instance_info')
def test__parse_deploy_info(self, instance_info_mock, driver_info_mock):
instance_info_mock.return_value = {'a': 'b'}
driver_info_mock.return_value = {'c': 'd'}
expected_info = {'a': 'b', 'c': 'd'}
actual_info = ilo_deploy._parse_deploy_info(self.node)
self.assertEqual(expected_info, actual_info)
@mock.patch.object(manager_utils, 'node_power_action')
@mock.patch.object(ilo_common, 'set_boot_device')
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot')
def test__reboot_into(self, setup_vmedia_mock, set_boot_device_mock,
node_power_action_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
opts = {'a': 'b'}
ilo_deploy._reboot_into(task, 'iso', opts)
setup_vmedia_mock.assert_called_once_with(task, 'iso', opts)
set_boot_device_mock.assert_called_once_with(task.node, 'CDROM')
node_power_action_mock.assert_called_once_with(task, states.REBOOT)
class IloVirtualMediaIscsiDeployTestCase(base.TestCase):
def setUp(self):
super(IloVirtualMediaIscsiDeployTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.context = context.get_admin_context()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='iscsi_ilo', driver_info=INFO_DICT)
@mock.patch.object(driver_utils, 'validate_boot_mode_capability')
@mock.patch.object(iscsi_deploy, 'validate_glance_image_properties')
@mock.patch.object(ilo_deploy, '_parse_deploy_info')
@mock.patch.object(iscsi_deploy, 'validate')
def test_validate(self, validate_mock, deploy_info_mock,
validate_prop_mock, validate_boot_mode_mock):
d_info = {'a': 'b'}
deploy_info_mock.return_value = d_info
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.validate(task)
validate_mock.assert_called_once_with(task)
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(task.context,
d_info, ['kernel_id', 'ramdisk_id'])
validate_boot_mode_mock.assert_called_once_with(task.node)
@mock.patch.object(ilo_deploy, '_reboot_into')
@mock.patch.object(ilo_deploy, '_get_single_nic_with_vif_port_id')
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options')
@mock.patch.object(manager_utils, 'node_power_action')
@mock.patch.object(ilo_common, 'set_boot_device')
@mock.patch.object(iscsi_deploy, 'check_image_size')
@mock.patch.object(iscsi_deploy, 'cache_instance_image')
def test_deploy(self, cache_instance_image_mock, check_image_size_mock,
set_boot_device_mock, node_power_action_mock,
build_opts_mock, get_nic_mock, reboot_into_mock):
deploy_opts = {'a': 'b'}
build_opts_mock.return_value = deploy_opts
get_nic_mock.return_value = '12:34:56:78:90:ab'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ilo_deploy_iso'] = 'deploy-iso'
returned_state = task.driver.deploy.deploy(task)
node_power_action_mock.assert_any_call(task, states.POWER_OFF)
cache_instance_image_mock.assert_called_once_with(task.context,
task.node)
check_image_size_mock.assert_called_once_with(task)
expected_ramdisk_opts = {'a': 'b', 'BOOTIF': '12:34:56:78:90:ab'}
build_opts_mock.assert_called_once_with(task.node, task.context)
get_nic_mock.assert_called_once_with(task)
reboot_into_mock.assert_called_once_with(task, 'glance:deploy-iso',
expected_ramdisk_opts)
self.assertEqual(states.DEPLOYWAIT, returned_state)
@mock.patch.object(manager_utils, 'node_power_action')
def test_tear_down(self, node_power_action_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.tear_down(task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
self.assertEqual(states.DELETED, returned_state)
@mock.patch.object(ilo_deploy, '_clean_up_boot_iso_for_instance')
@mock.patch.object(iscsi_deploy, 'destroy_images')
def test_clean_up(self, destroy_images_mock, clean_up_boot_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.clean_up(task)
destroy_images_mock.assert_called_once_with(task.node.uuid)
clean_up_boot_mock.assert_called_once_with(task.node)
class IloVirtualMediaAgentDeployTestCase(base.TestCase):
def setUp(self):
super(IloVirtualMediaAgentDeployTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.context = context.get_admin_context()
mgr_utils.mock_the_extension_manager(driver="agent_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='agent_ilo', driver_info=INFO_DICT)
@mock.patch.object(ilo_deploy, '_parse_driver_info')
def test_validate(self, parse_driver_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.validate(task)
parse_driver_info_mock.assert_called_once_with(task.node)
@mock.patch.object(ilo_deploy, '_reboot_into')
@mock.patch.object(agent, 'build_agent_options')
def test_deploy(self, build_options_mock, reboot_into_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
deploy_opts = {'a': 'b'}
build_options_mock.return_value = deploy_opts
task.node.driver_info['ilo_deploy_iso'] = 'deploy-iso-uuid'
returned_state = task.driver.deploy.deploy(task)
build_options_mock.assert_called_once_with()
reboot_into_mock.assert_called_once_with(task,
'glance:deploy-iso-uuid',
deploy_opts)
self.assertEqual(states.DEPLOYWAIT, returned_state)
@mock.patch.object(manager_utils, 'node_power_action')
def test_tear_down(self, node_power_action_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.tear_down(task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
self.assertEqual(states.DELETED, returned_state)
@mock.patch.object(agent, 'build_instance_info_for_deploy')
def test_prepare(self, build_instance_info_mock):
deploy_opts = {'a': 'b'}
build_instance_info_mock.return_value = deploy_opts
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.prepare(task)
self.assertEqual(deploy_opts, task.node.instance_info)
class VendorPassthruTestCase(base.TestCase):
def setUp(self):
super(VendorPassthruTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.context = context.get_admin_context()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='iscsi_ilo', driver_info=INFO_DICT)
@mock.patch.object(deploy_utils, 'notify_deploy_complete')
@mock.patch.object(ilo_common, 'set_boot_device')
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot')
@mock.patch.object(ilo_deploy, '_get_boot_iso')
@mock.patch.object(iscsi_deploy, 'continue_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
def test__continue_deploy_good(self, cleanup_vmedia_boot_mock,
continue_deploy_mock, get_boot_iso_mock,
setup_vmedia_mock, set_boot_device_mock,
notify_deploy_complete_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
continue_deploy_mock.return_value = 'root-uuid'
get_boot_iso_mock.return_value = 'boot-iso'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.DEPLOYWAIT
vendor = ilo_deploy.VendorPassthru()
vendor._continue_deploy(task, **kwargs)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
continue_deploy_mock.assert_called_once_with(task, **kwargs)
get_boot_iso_mock.assert_called_once_with(task, 'root-uuid')
setup_vmedia_mock.assert_called_once_with(task, 'boot-iso')
set_boot_device_mock.assert_called_once_with(task.node, 'CDROM')
self.assertEqual('boot-iso',
task.node.instance_info['ilo_boot_iso'])
notify_deploy_complete_mock.assert_called_once_with('123456')
@mock.patch.object(ilo_deploy, 'LOG')
def test__continue_deploy_bad(self, log_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.NOSTATE
vendor = ilo_deploy.VendorPassthru()
vendor._continue_deploy(task, **kwargs)
self.assertTrue(log_mock.error.called)
@mock.patch.object(iscsi_deploy, 'continue_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
def test__continue_deploy_deploy_no_boot_media(self,
cleanup_vmedia_boot_mock, continue_deploy_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
continue_deploy_mock.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.DEPLOYWAIT
vendor = ilo_deploy.VendorPassthru()
vendor._continue_deploy(task, **kwargs)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
continue_deploy_mock.assert_called_once_with(task, **kwargs)
class IloPXEDeployTestCase(base.TestCase):
def setUp(self):
super(IloPXEDeployTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.context = context.get_admin_context()
mgr_utils.mock_the_extension_manager(driver="pxe_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='pxe_ilo', driver_info=INFO_DICT)
@mock.patch.object(pxe.PXEDeploy, 'validate')
@mock.patch.object(driver_utils, 'validate_boot_mode_capability')
def test_validate(self, boot_mode_mock, pxe_validate_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.validate(task)
boot_mode_mock.assert_called_once_with(task.node)
pxe_validate_mock.assert_called_once_with(task)
@mock.patch.object(pxe.PXEDeploy, 'prepare')
@mock.patch.object(ilo_common, 'set_boot_mode')
@mock.patch.object(driver_utils, 'get_node_capability')
def test_prepare(self, node_capability_mock,
set_boot_mode_mock, pxe_prepare_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
node_capability_mock.return_value = 'uefi'
task.driver.deploy.prepare(task)
node_capability_mock.assert_called_once_with(task.node,
'boot_mode')
set_boot_mode_mock.assert_called_once_with(task.node, 'uefi')
pxe_prepare_mock.assert_called_once_with(task)
@mock.patch.object(pxe.PXEDeploy, 'prepare')
@mock.patch.object(ilo_common, 'update_boot_mode_capability')
@mock.patch.object(driver_utils, 'get_node_capability')
def test_prepare_boot_mode_doesnt_exist(self, node_capability_mock,
update_capability_mock,
pxe_prepare_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
node_capability_mock.return_value = None
task.driver.deploy.prepare(task)
update_capability_mock.assert_called_once_with(task)
pxe_prepare_mock.assert_called_once_with(task)
@mock.patch.object(pxe.PXEDeploy, 'deploy')
@mock.patch.object(ilo_common, 'set_boot_device')
def test_deploy_boot_mode_exists(self, set_persistent_mock,
pxe_deploy_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.deploy(task)
set_persistent_mock.assert_called_with(task.node, 'NETWORK', False)
pxe_deploy_mock.assert_called_once_with(task)
class IloPXEVendorPassthruTestCase(base.TestCase):
def setUp(self):
super(IloPXEVendorPassthruTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.context = context.get_admin_context()
mgr_utils.mock_the_extension_manager(driver="pxe_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='pxe_ilo', driver_info=INFO_DICT)
@mock.patch.object(pxe.VendorPassthru, 'vendor_passthru')
@mock.patch.object(ilo_common, 'set_boot_device')
def test_vendorpassthru(self, set_persistent_mock,
pxe_vendorpassthru_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.DEPLOYWAIT
task.driver.vendor.vendor_passthru(task, **kwargs)
set_persistent_mock.assert_called_with(task.node, 'NETWORK', True)
pxe_vendorpassthru_mock.assert_called_once_with(task, **kwargs)
|
|
#!/usr/bin/env python
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
import scipy
import pylab
import scipy.cluster.hierarchy as sch
from scipy import stats
# User defined color maps (in addition to matplotlib ones)
bbcyr = {'red': ( (0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.50, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ( (0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.50, 1.0, 1.0),
(0.75, 1.0, 1.0),
(1.0, 0.0, 1.0)),
'blue': ( (0.0, 0.0, 0.0),
(0.25, 1.0, 1.0),
(0.50, 1.0, 1.0),
(0.75, 0.0, 0.0),
(1.0, 0.0, 1.0))}
bbcry = {'red': ( (0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.50, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ( (0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.50, 1.0, 1.0),
(0.75, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ( (0.0, 0.0, 0.0),
(0.25, 1.0, 1.0),
(0.50, 1.0, 1.0),
(0.75, 0.0, 0.0),
(1.0, 0.0, 1.0))}
my_colormaps = [ ('bbcyr',bbcyr),
('bbcry',bbcry)]
tax_units = "kpcofgs"
def read_params(args):
import argparse as ap
import textwrap
p = ap.ArgumentParser( description= "This scripts generates heatmaps with hierarchical clustering \n"
"of both samples and microbial clades. The script can also subsample \n"
"the number of clades to display based on the their nth percentile \n"
"abundance value in each sample\n" )
p.add_argument( '--in', metavar='INPUT_FILE', type=str, default=None, required = True,
help= "The input file of microbial relative abundances. \n"
"This file is typically obtained with the \"utils/merge_metaphlan_tables.py\"\n")
p.add_argument( '--out', metavar='OUTPUT_FILE', type=str, default=None, required = True,
help= "The output image. \n"
"The extension of the file determines the image format. png, pdf, and svg are the preferred format" )
p.add_argument( '-m', type=str,
choices=[ "single","complete","average",
"weighted","centroid","median",
"ward" ],
default="average",
help = "The hierarchical clustering method, default is \"average\"\n" )
dist_funcs = [ "euclidean","minkowski","cityblock","seuclidean",
"sqeuclidean","cosine","correlation","hamming",
"jaccard","chebyshev","canberra","braycurtis",
"mahalanobis","yule","matching","dice",
"kulsinski","rogerstanimoto","russellrao","sokalmichener",
"sokalsneath","wminkowski","ward"]
p.add_argument( '-d', type=str, choices=dist_funcs, default="braycurtis",
help="The distance function for samples. Default is \"braycurtis\"")
p.add_argument( '-f', type=str, choices=dist_funcs, default="correlation",
help="The distance function for microbes. Default is \"correlation\"")
p.add_argument( '-s', metavar='scale norm', type=str,
default = 'lin', choices = ['log','lin'])
p.add_argument( '-x', type=float, default = 0.1,
help="Width of heatmap cells. Automatically set, this option should not be necessary unless for very large heatmaps")
p.add_argument( '-y', type=float, default = 0.1,
help="Height of heatmap cells. Automatically set, this option should not be necessary unless for very large heatmaps")
p.add_argument( '--minv', type=float, default = 0.0,
help="Minimum value to display. Default is 0.0, values around 0.001 are also reasonable")
p.add_argument( '--maxv', metavar='max value', type=float,
help="Maximum value to display. Default is maximum value present, can be set e.g. to 100 to display the full scale")
p.add_argument( '--tax_lev', metavar='TAXONOMIC_LEVEL', type=str,
choices='a'+tax_units, default='s', help =
"The taxonomic level to display:\n"
"'a' : all taxonomic levels\n"
"'k' : kingdoms (Bacteria and Archaea) only\n"
"'p' : phyla only\n"
"'c' : classes only\n"
"'o' : orders only\n"
"'f' : families only\n"
"'g' : genera only\n"
"'s' : species only\n"
"[default 's']" )
p.add_argument( '--perc', type=int, default=None,
help="Percentile to be used for ordering the microbes in order to select with --top the most abundant microbes only. Default is 90")
p.add_argument( '--top', type=int, default=None,
help="Display the --top most abundant microbes only (ordering based on --perc)")
p.add_argument( '--sdend_h', type=float, default = 0.1,
help="Set the height of the sample dendrogram. Default is 0.1")
p.add_argument( '--fdend_w', type=float, default = 0.1,
help="Set the width of the microbes dendrogram. Default is 0.1")
p.add_argument( '--cm_h', type=float, default = 0.03,
help="Set the height of the colormap. Default = 0.03" )
p.add_argument( '--cm_ticks', metavar='label for ticks of the colormap', type=str,
default = None )
p.add_argument( '--font_size', type=int, default = 7,
help = "Set label font sizes. Default is 7\n" )
p.add_argument( '--clust_line_w', type=float, default = 1.0,
help="Set the line width for the dendrograms" )
col_maps = ['Accent', 'Blues', 'BrBG', 'BuGn', 'BuPu', 'Dark2', 'GnBu',
'Greens', 'Greys', 'OrRd', 'Oranges', 'PRGn', 'Paired',
'Pastel1', 'Pastel2', 'PiYG', 'PuBu', 'PuBuGn', 'PuOr',
'PuRd', 'Purples', 'RdBu', 'RdGy', 'RdPu', 'RdYlBu', 'RdYlGn',
'Reds', 'Set1', 'Set2', 'Set3', 'Spectral', 'YlGn', 'YlGnBu',
'YlOrBr', 'YlOrRd', 'afmhot', 'autumn', 'binary', 'bone',
'brg', 'bwr', 'cool', 'copper', 'flag', 'gist_earth',
'gist_gray', 'gist_heat', 'gist_ncar', 'gist_rainbow',
'gist_stern', 'gist_yarg', 'gnuplot', 'gnuplot2', 'gray',
'hot', 'hsv', 'jet', 'ocean', 'pink', 'prism', 'rainbow',
'seismic', 'spectral', 'spring', 'summer', 'terrain', 'winter'] + [n for n,c in my_colormaps]
p.add_argument( '-c', type=str, choices = col_maps, default = 'jet',
help="Set the colormap. Default is \"jet\"." )
return vars(p.parse_args())
# Predefined colors for dendrograms brances and class labels
colors = [ "#B22222","#006400","#0000CD","#9400D3","#696969","#8B4513",
"#FF1493","#FF8C00","#3CB371","#00Bfff","#CDC9C9","#FFD700",
"#2F4F4F","#FF0000","#ADFF2F","#B03060" ]
def samples2classes_panel(fig, samples, s2l, idx1, idx2, height, xsize, cols, legendon, fontsize, label2cols, legend_ncol ):
from matplotlib.patches import Rectangle
samples2labels = dict([(s,l)
for s,l in [ll.strip().split('\t')
for ll in open(s2l)]])
if label2cols:
labels2colors = dict([(l[0],l[1]) for l in [ll.strip().split('\t') for ll in open(label2cols)]])
else:
cs = cols if cols else colors
labels2colors = dict([(l,cs[i%len(cs)]) for i,l in enumerate(set(samples2labels.values()))])
ax1 = fig.add_axes([0.,1.0,1.0,height],frameon=False)
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_ylim( [0.0, height] )
ax1.set_xlim( [0.0, xsize] )
step = xsize / float(len(samples))
labels = set()
added_labels = set()
for i,ind in enumerate(idx2):
if not samples[ind] in samples2labels or \
not samples2labels[samples[ind]] in labels2colors:
fc, ll = None, None
else:
ll = samples2labels[samples[ind]]
ll = None if ll in added_labels else ll
added_labels.add( ll )
fc = labels2colors[samples2labels[samples[ind]]]
rect = Rectangle( [float(i)*step, 0.0], step, height,
facecolor = fc,
label = ll,
edgecolor='b', lw = 0.0)
labels.add( ll )
ax1.add_patch(rect)
ax1.autoscale_view()
if legendon:
ax1.legend( loc = 2, ncol = legend_ncol, bbox_to_anchor=(1.01, 3.),
borderpad = 0.0, labelspacing = 0.0,
handlelength = 0.5, handletextpad = 0.3,
borderaxespad = 0.0, columnspacing = 0.3,
prop = {'size':fontsize}, frameon = False)
def samples_dend_panel( fig, Z, Z2, ystart, ylen, lw ):
ax2 = fig.add_axes([0.0,1.0+ystart,1.0,ylen], frameon=False)
Z2['color_list'] = [c.replace('b','k') for c in Z2['color_list']]
mh = max(Z[:,2])
sch._plot_dendrogram( Z2['icoord'], Z2['dcoord'], Z2['ivl'],
Z.shape[0] + 1, Z.shape[0] + 1,
mh, 'top', no_labels=True,
color_list=Z2['color_list'] )
for coll in ax2.collections:
coll._linewidths = (lw,)
ax2.set_xticks([])
ax2.set_yticks([])
ax2.set_xticklabels([])
def features_dend_panel( fig, Z, Z2, width, lw ):
ax1 = fig.add_axes([-width,0.0,width,1.0], frameon=False)
Z2['color_list'] = [c.replace('b','k').replace('x','b') for c in Z2['color_list']]
mh = max(Z[:,2])
sch._plot_dendrogram(Z2['icoord'], Z2['dcoord'], Z2['ivl'], Z.shape[0] + 1, Z.shape[0] + 1, mh, 'right', no_labels=True, color_list=Z2['color_list'])
for coll in ax1.collections:
coll._linewidths = (lw,)
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_xticklabels([])
def add_cmap( cmapdict, name ):
my_cmap = matplotlib.colors.LinearSegmentedColormap(name,cmapdict,256)
pylab.register_cmap(name=name,cmap=my_cmap)
def init_fig(xsize,ysize,ncol):
fig = pylab.figure(figsize=(xsize,ysize))
sch._link_line_colors = colors[:ncol]
return fig
def heatmap_panel( fig, D, minv, maxv, idx1, idx2, cm_name, scale, cols, rows, label_font_size, cb_offset, cb_l, flabelson, slabelson, cm_ticks, gridon, bar_offset ):
cm = pylab.get_cmap(cm_name)
bottom_col = [ cm._segmentdata['red'][0][1],
cm._segmentdata['green'][0][1],
cm._segmentdata['blue'][0][1] ]
axmatrix = fig.add_axes( [0.0,0.0,1.0,1.0],
axisbg=bottom_col)
if any([c < 0.95 for c in bottom_col]):
axmatrix.spines['right'].set_color('none')
axmatrix.spines['left'].set_color('none')
axmatrix.spines['top'].set_color('none')
axmatrix.spines['bottom'].set_color('none')
norm_f = matplotlib.colors.LogNorm if scale == 'log' else matplotlib.colors.Normalize
im = axmatrix.matshow( D, norm = norm_f( vmin=minv if minv > 0.0 else None,
vmax=maxv),
aspect='auto', origin='lower', cmap=cm, vmax=maxv)
axmatrix2 = axmatrix.twinx()
axmatrix3 = axmatrix.twiny()
axmatrix.set_xticks([])
axmatrix2.set_xticks([])
axmatrix3.set_xticks([])
axmatrix.set_yticks([])
axmatrix2.set_yticks([])
axmatrix3.set_yticks([])
axmatrix.set_xticklabels([])
axmatrix2.set_xticklabels([])
axmatrix3.set_xticklabels([])
axmatrix.set_yticklabels([])
axmatrix2.set_yticklabels([])
axmatrix3.set_yticklabels([])
if any([c < 0.95 for c in bottom_col]):
axmatrix2.spines['right'].set_color('none')
axmatrix2.spines['left'].set_color('none')
axmatrix2.spines['top'].set_color('none')
axmatrix2.spines['bottom'].set_color('none')
if any([c < 0.95 for c in bottom_col]):
axmatrix3.spines['right'].set_color('none')
axmatrix3.spines['left'].set_color('none')
axmatrix3.spines['top'].set_color('none')
axmatrix3.spines['bottom'].set_color('none')
if flabelson:
axmatrix2.set_yticks(np.arange(len(rows))+0.5)
axmatrix2.set_yticklabels([rows[r] for r in idx1],size=label_font_size,va='center')
if slabelson:
axmatrix.set_xticks(np.arange(len(cols)))
axmatrix.set_xticklabels([cols[r] for r in idx2],size=label_font_size,rotation=90,va='top',ha='center')
axmatrix.tick_params(length=0)
axmatrix2.tick_params(length=0)
axmatrix3.tick_params(length=0)
axmatrix2.set_ylim(0,len(rows))
if gridon:
axmatrix.set_yticks(np.arange(len(idx1)-1)+0.5)
axmatrix.set_xticks(np.arange(len(idx2))+0.5)
axmatrix.grid( True )
ticklines = axmatrix.get_xticklines()
ticklines.extend( axmatrix.get_yticklines() )
#gridlines = axmatrix.get_xgridlines()
#gridlines.extend( axmatrix.get_ygridlines() )
for line in ticklines:
line.set_linewidth(3)
if cb_l > 0.0:
axcolor = fig.add_axes([0.0,1.0+bar_offset*1.25,1.0,cb_l])
cbar = fig.colorbar(im, cax=axcolor, orientation='horizontal')
cbar.ax.tick_params(labelsize=label_font_size)
if cm_ticks:
cbar.ax.set_xticklabels( cm_ticks.split(":") )
def read_table( fin, xstart,xstop,ystart,ystop, percentile = None, top = None, tax_lev = 's' ):
mat = [l.strip().split('\t') for l in open( fin ) if l.strip()]
if tax_lev != 'a':
i = tax_units.index(tax_lev)
mat = [m for i,m in enumerate(mat) if i == 0 or m[0].split('|')[-1][0] == tax_lev or ( len(m[0].split('|')) == i and m[0].split('|')[-1][0].endswith("unclassified"))]
sample_labels = mat[0][xstart:xstop]
m = [(mm[xstart-1],np.array([float(f) for f in mm[xstart:xstop]])) for mm in mat[ystart:ystop]]
if top and not percentile:
percentile = 90
if percentile:
m = sorted(m,key=lambda x:-stats.scoreatpercentile(x[1],percentile))
if top:
feat_labels = [mm[0].split("|")[-1] for mm in m[:top]]
m = [mm[1] for mm in m[:top]]
else:
feat_labels = [mm[0].split("|")[-1] for mm in m]
m = [mm[1] for mm in m]
D = np.matrix( np.array( m ) )
return D, feat_labels, sample_labels
def read_dm( fin, n ):
mat = [[float(f) for f in l.strip().split('\t')] for l in open( fin )]
nc = sum([len(r) for r in mat])
if nc == n*n:
dm = []
for i in range(n):
dm += mat[i][i+1:]
return np.array(dm)
if nc == (n*n-n)/2:
dm = []
for i in range(n):
dm += mat[i]
return np.array(dm)
sys.stderr.write( "Error in reading the distance matrix\n" )
sys.exit()
def hclust( fin, fout,
method = "average",
dist_func = "euclidean",
feat_dist_func = "d",
xcw = 0.1,
ycw = 0.1,
scale = 'lin',
minv = 0.0,
maxv = None,
xstart = 1,
ystart = 1,
xstop = None,
ystop = None,
percentile = None,
top = None,
cm_name = 'jet',
s2l = None,
label_font_size = 7,
feat_dend_col_th = None,
sample_dend_col_th = None,
clust_ncols = 7,
clust_line_w = 1.0,
label_cols = None,
sdend_h = 0.1,
fdend_w = 0.1,
cm_h = 0.03,
dmf = None,
dms = None,
legendon = False,
label2cols = None,
flabelon = True,
slabelon = True,
cm_ticks = None,
legend_ncol = 3,
pad_inches = None,
legend_font_size = 7,
gridon = 0,
tax_lev = 's'):
if label_cols and label_cols.count("-"):
label_cols = label_cols.split("-")
for n,c in my_colormaps:
add_cmap( c, n )
if feat_dist_func == 'd':
feat_dist_func = dist_func
D, feat_labels, sample_labels = read_table(fin,xstart,xstop,ystart,ystop,percentile,top,tax_lev=tax_lev)
ylen,xlen = D[:].shape
Dt = D.transpose()
size_cx, size_cy = xcw, ycw
xsize, ysize = max(xlen*size_cx,2.0), max(ylen*size_cy,2.0)
ydend_offset = 0.025*8.0/ysize if s2l else 0.0
fig = init_fig(xsize,ysize,clust_ncols)
nfeats, nsamples = len(D), len(Dt)
if dmf:
p1 = read_dm( dmf, nfeats )
Y1 = sch.linkage( p1, method=method )
else:
if len(D) < 2 or len(Dt) < 2:
Y1 = []
elif feat_dist_func == 'correlation':
Y1 = sch.linkage( D, method=method, metric=lambda x,y:max(0.0,scipy.spatial.distance.correlation(x,y)) )
else:
Y1 = sch.linkage( D, method=method, metric=feat_dist_func )
if len(Y1):
Z1 = sch.dendrogram(Y1, no_plot=True, color_threshold=feat_dend_col_th)
idx1 = Z1['leaves']
else:
idx1 = list(range(len(D)))
if dms:
p2 = read_dm( dms, nsamples )
Y2 = sch.linkage( p2, method=method )
else:
if len(Dt) < 2 or len(D) < 2:
Y2 = []
elif sample_dend_col_th == 'correlation':
Y2 = sch.linkage( Dt, method=method, metric=lambda x,y:max(0.0,scipy.spatial.distance.correlation(x,y)) )
else:
Y2 = sch.linkage( Dt, method=method, metric=dist_func )
if len(Y2):
Z2 = sch.dendrogram(Y2, no_plot=True, color_threshold=sample_dend_col_th)
idx2 = Z2['leaves']
else:
idx2 = list(range(len(Dt)))
D = D[idx1,:][:,idx2]
if fdend_w > 0.0 and len(Y1):
features_dend_panel(fig, Y1, Z1, fdend_w*8.0/xsize, clust_line_w )
if sdend_h > 0.0 and len(Y2):
samples_dend_panel(fig, Y2, Z2, ydend_offset, sdend_h*8.0/ysize, clust_line_w)
if s2l:
samples2classes_panel( fig, sample_labels, s2l, idx1, idx2, 0.025*8.0/ysize, xsize, label_cols, legendon, legend_font_size, label2cols, legend_ncol )
heatmap_panel( fig, D, minv, maxv, idx1, idx2, cm_name, scale, sample_labels, feat_labels, label_font_size, -cm_h*8.0/ysize, cm_h*0.8*8.0/ysize, flabelon, slabelon, cm_ticks, gridon, ydend_offset+sdend_h*8.0/ysize )
fig.savefig( fout, bbox_inches='tight',
pad_inches = pad_inches,
dpi=300) if fout else pylab.show()
if __name__ == '__main__':
pars = read_params( sys.argv )
hclust( fin = pars['in'],
fout = pars['out'],
method = pars['m'],
dist_func = pars['d'],
feat_dist_func = pars['f'],
xcw = pars['x'],
ycw = pars['y'],
scale = pars['s'],
minv = pars['minv'],
maxv = pars['maxv'],
percentile = pars['perc'],
top = pars['top'],
cm_name = pars['c'],
label_font_size = pars['font_size'],
clust_line_w = pars['clust_line_w'],
sdend_h = pars['sdend_h'],
fdend_w = pars['fdend_w'],
cm_h = pars['cm_h'],
cm_ticks = pars['cm_ticks'],
pad_inches = 0.1,
tax_lev = pars['tax_lev']
)
|
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import sys
from atom.api import Atom, Float, Int, Str, Typed, Value, set_default
from enaml.qt.QtCore import Qt, QRect, QPoint
from enaml.qt.QtGui import QFrame, QImage, QPainter
# Make sure the resources get registered.
from . import dock_resources
class QGuideRose(QFrame):
""" A custom QFrame which implements a collection of docking guides.
This widget must always be used as an independent top-level window.
The dock area which uses the rose should manually set the geometry
of the widget before showing it.
"""
class Guide(object):
""" An enum class for identifying guide locations.
"""
#: No relevant guide.
NoGuide = 0
#: The north border guide.
BorderNorth = 1
#: The east border guide.
BorderEast = 2
#: The south border guide.
BorderSouth = 3
#: The west border guide.
BorderWest = 4
#: The north compass guide.
CompassNorth = 5
#: The east compass guide.
CompassEast = 6
#: The south compass guide.
CompassSouth = 7
#: The west compass guide.
CompassWest = 8
#: The center compass guide.
CompassCenter = 9
#: The extended compass north guide.
CompassExNorth = 10
#: The extended compass east guide.
CompassExEast = 11
#: The extended compass south guide.
CompassExSouth = 12
#: The extended compass west guide.
CompassExWest = 13
#: The vertical split guide.
SplitVertical = 14
#: The horizontal split guide.
SplitHorizontal = 15
#: The area center guide.
AreaCenter = 16
#: The extended border north guide.
BorderExNorth = 17
#: The extended border east guide.
BorderExEast = 18
#: The extended border south guide.
BorderExSouth = 19
#: The extended border west guide.
BorderExWest = 20
class Mode(object):
""" An enum class for defining the mode for the guide rose.
A mode is an or'd combination of flags which dictate which parts
of the guide rose are active on the screen. The modes related to
the centerpiece should be considered mutually exclusive.
"""
#: Nothing will be shown.
NoMode = 0x0
#: Show the border guides.
Border = 0x1
#: Show the standard compass as the centerpiece.
Compass = 0x2
#: Show the extended compass as the centerpiece.
CompassEx = 0x4
#: Show the horizontal split guide as the centerpiece.
SplitHorizontal = 0x8
#: Show the vertical split guide as the centerpiece.
SplitVertical = 0x10
#: Show the vertical area center as the centerpiece.
AreaCenter = 0x20
def __init__(self):
""" Initialize a QGuideRose.
"""
super(QGuideRose, self).__init__()
# On Mac, setting the translucent background does not cause the
# frame shadow to be hidden; it must be explicitly hidden. Mac
# also requires the window to be a tooltip in order to be raised
# above the rubber band in the Z-order. On Windows, the tooltip
# leaves a dropshadow on Qt >= 4.8 whereas tool does not.
if sys.platform == 'darwin':
self.setAttribute(Qt.WA_MacNoShadow, True)
flags = Qt.ToolTip
else:
flags = Qt.Tool
self.setAttribute(Qt.WA_TranslucentBackground, True)
flags |= Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint
self.setWindowFlags(flags)
self._mode = self.Mode.NoMode
self._center_point = QPoint()
self._border_guide = BorderGuide()
self._compass_guide = CompassGuide()
self._compass_ex_guide = CompassExGuide()
self._vsplit_guide = SplitVerticalGuide()
self._hsplit_guide = SplitHorizontalGuide()
self._area_guide = AreaCenterGuide()
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _layoutGuides(self):
""" Layout the guides based on the current widget geometry.
"""
self._border_guide.layout(self.rect())
self._compass_guide.layout(self._center_point)
self._compass_ex_guide.layout(self._center_point)
self._vsplit_guide.layout(self._center_point)
self._hsplit_guide.layout(self._center_point)
self._area_guide.layout(self._center_point)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def centerPoint(self):
""" Get the center point of the guide rose.
Returns
-------
result : QPoint
The location that will be used as the center of the portion
of the rose with a configurable location.
"""
return self._center_point
def setCenterPoint(self, pos):
""" Set the center point of the guide rose.
Parameters
----------
pos : QPoint
The location that will be used as the center of the portion
of the rose with a configurable location.
"""
if pos != self._center_point:
self._center_point = pos
self._layoutGuides()
self.update()
def mode(self):
""" Get the mode of the guide rose.
Returns
-------
result : GuideMode
The guide mode applied to the guide rose.
"""
return self._mode
def setMode(self, mode):
""" Set the mode of the guide rose.
Parameters
----------
mode : GuideMode
An or'd combination of mode flags for the guide rose.
"""
if mode != self._mode:
self._mode = mode
self.update()
def mouseOver(self, pos):
""" Update the guide pads based on the mouse position.
This current mode of the guide rose is used to determine which
of the guide pads to should be updated.
Parameters
----------
pos : QPoint
The position of the mouse expressed in local coordinates.
"""
self._border_guide.mouse_over(pos)
self._compass_guide.mouse_over(pos)
self._compass_ex_guide.mouse_over(pos)
self._vsplit_guide.mouse_over(pos)
self._hsplit_guide.mouse_over(pos)
self._area_guide.mouse_over(pos)
self.update()
def guideAt(self, pos, mode=None):
""" Get the guide which lies underneath a given position.
Parameters
----------
pos : QPoint
The position of interest, expressed local coordinates.
mode : QGuideRose.Mode, optional
The mode to use for hit testing. If not provided, the
current mode for the guide rose is used.
Returns
-------
result : QGuideRose.Guide
The enum value for the guide under the mouse position.
"""
Guide = self.Guide
Mode = self.Mode
mode = mode if mode is not None else self._mode
if mode & Mode.Border:
g = self._border_guide.guide_at(pos)
if g != Guide.NoGuide:
return g
if mode & Mode.Compass:
g = self._compass_guide.guide_at(pos)
if g != Guide.NoGuide:
return g
elif mode & Mode.CompassEx:
g = self._compass_ex_guide.guide_at(pos)
if g != Guide.NoGuide:
return g
elif mode & Mode.SplitHorizontal:
g = self._hsplit_guide.guide_at(pos)
if g != Guide.NoGuide:
return g
elif mode & Mode.SplitVertical:
g = self._vsplit_guide.guide_at(pos)
if g != Guide.NoGuide:
return g
elif mode & Mode.AreaCenter:
g = self._area_guide.guide_at(pos)
if g != Guide.NoGuide:
return g
return Guide.NoGuide
#--------------------------------------------------------------------------
# Reimplementations
#--------------------------------------------------------------------------
def resizeEvent(self, event):
""" Handle the resize event for the rose.
This handler will relayout the guides on a resize.
"""
self._layoutGuides()
def paintEvent(self, event):
""" Handle the paint event for the rose.
This handler will redraw all of the guides for the rose.
"""
super(QGuideRose, self).paintEvent(event)
painter = QPainter(self)
Mode = self.Mode
mode = self._mode
if mode & Mode.Border:
self._border_guide.paint(painter)
if mode & Mode.Compass:
self._compass_guide.paint(painter)
elif mode & Mode.CompassEx:
self._compass_ex_guide.paint(painter)
elif mode & Mode.SplitHorizontal:
self._hsplit_guide.paint(painter)
elif mode & Mode.SplitVertical:
self._vsplit_guide.paint(painter)
elif mode & Mode.AreaCenter:
self._area_guide.paint(painter)
class GuideImage(Atom):
""" A class which manages the painting of a guide image.
"""
#: The default alpha value for guide transparency.
TRANSPARENT = 0.60
#: The default alpha value for no guide transparency.
OPAQUE = 1.0
#: The QImage to use when painting the guide.
image = Typed(QImage, factory=lambda: QImage())
#: The QRect specifying where to draw the image.
rect = Typed(QRect, factory=lambda: QRect())
#: The opacity to use when drawing the image.
opacity = Float(TRANSPARENT)
#: A cache of QImage instances for the loaded guide images.
_images = {}
@classmethod
def load_image(cls, name):
""" Load the guide image for the given name into a QImage.
This function is hard-coded to return the named .png image from
the ./dockguides directory located alongside this file. It is not
a generic image loading routine.
"""
image = cls._images.get(name)
if image is None:
image = QImage(':dock_images/%s.png' % name)
cls._images[name] = image
return image
def __init__(self, name):
""" Initialize a GuideImage.
Parameters
----------
name : string
The name of the image to load for the guide.
"""
self.image = self.load_image(name)
def opacify(self):
""" Make the guide image opaque.
"""
self.opacity = self.OPAQUE
def transparentize(self):
""" Make the guide image transparent.
"""
self.opacity = self.TRANSPARENT
def contains(self, point):
""" Test whether the image contains a point.
Parameters
----------
rect : QPoint
The rect to test for containment.
Returns
-------
result : bool
True if the image contains the point, False otherwise.
"""
return self.rect.contains(point)
def paint(self, painter):
""" Paint the image using the given painter.
Parameters
----------
painter : QPainter
An active QPainter to use for drawing the image. If the
image is a null image, painting will be skipped.
"""
image = self.image
if image.isNull():
return
painter.save()
painter.setOpacity(self.opacity)
painter.drawImage(self.rect, image)
painter.restore()
class GuideHandler(Atom):
""" A base class for defining guide handlers.
"""
#: The last guide hit during a mouseover.
_last_guide = Typed(GuideImage)
def iterguides(self):
""" Iterate the guides managed by this handler.
Returns
-------
result : iterable
An iterable of (Guide, GuideImage) pairs which are the
guides managed by the handler.
"""
raise NotImplementedError
def iterboxes(self):
""" Iterate the boxes which lie under the guides.
Returns
-------
result : iterable
An iterable of GuideImage instances which are the boxes
to be painted under the guides.
"""
raise NotImplementedError
def guide_at(self, pos):
""" Get the guide under the given mouse position.
Parameters
----------
pos : QPoint
The point of interest, expressed in layout coordinates.
Returns
-------
result : Guide
The enum value for the guide at the given position.
"""
for enum, guide in self.iterguides():
if guide.contains(pos):
return enum
return QGuideRose.Guide.NoGuide
def mouse_over(self, pos):
""" Perform a mouse over of the guides.
Parameters
----------
pos : QPoint
The position of interest expressed in layout coordinates.
"""
for ignored, guide in self.iterguides():
if guide.contains(pos):
last = self._last_guide
if last is not None and last is not guide:
last.transparentize()
guide.opacify()
self._last_guide = guide
break
else:
if self._last_guide is not None:
self._last_guide.transparentize()
def paint(self, painter):
""" Paint the guides using the supplied painter.
Parameters
----------
painter : QPainter
The painter to use to paint the guides.
"""
for box in self.iterboxes():
box.paint(painter)
for ignored, guide in self.iterguides():
guide.paint(painter)
class BorderGuide(GuideHandler):
""" A guide handler which manages the border guide.
"""
_guides = Value(factory=lambda: {
QGuideRose.Guide.BorderNorth: GuideImage('thin_horizontal'),
QGuideRose.Guide.BorderExNorth: GuideImage('bar_horizontal'),
QGuideRose.Guide.BorderEast: GuideImage('thin_vertical'),
QGuideRose.Guide.BorderExEast: GuideImage('bar_vertical'),
QGuideRose.Guide.BorderSouth: GuideImage('thin_horizontal'),
QGuideRose.Guide.BorderExSouth: GuideImage('bar_horizontal'),
QGuideRose.Guide.BorderWest: GuideImage('thin_vertical'),
QGuideRose.Guide.BorderExWest: GuideImage('bar_vertical'),
})
_boxes = Value(factory=lambda: {
QGuideRose.Guide.BorderNorth: GuideImage('guide_box'),
QGuideRose.Guide.BorderEast: GuideImage('guide_box'),
QGuideRose.Guide.BorderSouth: GuideImage('guide_box'),
QGuideRose.Guide.BorderWest: GuideImage('guide_box'),
})
def iterguides(self):
""" Iterate the guides managed by the handler.
Returns
-------
result : iterable
An iterable of (Guide, GuideImage) pairs which are the
guides managed by the handler.
"""
return self._guides.iteritems()
def iterboxes(self):
""" Iterate the boxes which lie under the guides.
Returns
-------
result : iterable
An iterable of GuideImage instances which are the boxes
to be painted under the guides.
"""
return self._boxes.itervalues()
def layout(self, rect):
""" Layout the guides for the given rect.
Parameters
----------
rect : QRect
The rectangle in which to layout the border guides.
"""
boxes = self._boxes
guides = self._guides
w = rect.width()
h = rect.height()
cx = rect.left() + w / 2
cy = rect.top() + h / 2
Guide = QGuideRose.Guide
guides[Guide.BorderNorth].rect = QRect(cx - 15, 27, 31, 19)
guides[Guide.BorderExNorth].rect = QRect(cx - 15, 15, 31, 10)
boxes[Guide.BorderNorth].rect = QRect(cx - 20, 10, 41, 41)
guides[Guide.BorderEast].rect = QRect(w - 45, cy - 15, 19, 31)
guides[Guide.BorderExEast].rect = QRect(w - 24, cy - 15, 10, 31)
boxes[Guide.BorderEast].rect = QRect(w - 50, cy - 20, 41, 41)
guides[Guide.BorderSouth].rect = QRect(cx - 15, h - 45, 31, 19)
guides[Guide.BorderExSouth].rect = QRect(cx - 15, h - 24, 31, 10)
boxes[Guide.BorderSouth].rect = QRect(cx - 20, h - 50, 41, 41)
guides[Guide.BorderWest].rect = QRect(27, cy - 15, 19, 31)
guides[Guide.BorderExWest].rect = QRect(15, cy - 15, 10, 31)
boxes[Guide.BorderWest].rect = QRect(10, cy - 20, 41, 41)
class CompassGuide(GuideHandler):
""" A guide handler which manages the standard compass guide.
"""
_guides = Value(factory=lambda: {
QGuideRose.Guide.CompassNorth: GuideImage('arrow_north'),
QGuideRose.Guide.CompassEast: GuideImage('arrow_east'),
QGuideRose.Guide.CompassSouth: GuideImage('arrow_south'),
QGuideRose.Guide.CompassWest: GuideImage('arrow_west'),
QGuideRose.Guide.CompassCenter: GuideImage('center'),
})
_box = Value(factory=lambda: GuideImage('cross_box'))
def iterguides(self):
""" Iterate the guides for the compass.
Returns
-------
result : generator
A generator which yields 2-tuples of (enum, guide) for
the relevant guides in the compass.
"""
return self._guides.iteritems()
def iterboxes(self):
""" Iterate the boxes which lie under the guides.
Returns
-------
result : iterable
An iterable of GuideImage instances which are the boxes
to be painted under the guides.
"""
yield self._box
def layout(self, pos):
""" Layout the guides for the given position.
Parameters
----------
pos : QPoint
The center point of the compass.
"""
x = pos.x()
y = pos.y()
Guide = QGuideRose.Guide
guides = self._guides
guides[Guide.CompassNorth].rect = QRect(x - 15, y - 50, 31, 31)
guides[Guide.CompassEast].rect = QRect(x + 20, y - 15, 31, 31)
guides[Guide.CompassSouth].rect = QRect(x - 15, y + 20, 31, 31)
guides[Guide.CompassWest].rect = QRect(x - 50, y - 15, 31, 31)
guides[Guide.CompassCenter].rect = QRect(x - 15, y - 15, 31, 31)
self._box.rect = QRect(x - 55, y - 55, 111, 111)
class CompassExGuide(GuideHandler):
""" A class which renders the extended compass guide.
"""
_guides = Value(factory=lambda: {
QGuideRose.Guide.CompassNorth: GuideImage('arrow_north'),
QGuideRose.Guide.CompassEast: GuideImage('arrow_east'),
QGuideRose.Guide.CompassSouth: GuideImage('arrow_south'),
QGuideRose.Guide.CompassWest: GuideImage('arrow_west'),
QGuideRose.Guide.CompassCenter: GuideImage('center'),
QGuideRose.Guide.CompassExNorth: GuideImage('bar_horizontal'),
QGuideRose.Guide.CompassExEast: GuideImage('bar_vertical'),
QGuideRose.Guide.CompassExSouth: GuideImage('bar_horizontal'),
QGuideRose.Guide.CompassExWest: GuideImage('bar_vertical'),
})
_box = Value(factory=lambda: GuideImage('cross_ex_box'))
def iterguides(self):
""" Iterate the guides for the extented compass.
Returns
-------
result : generator
A generator which yields 2-tuples of (enum, guide) for
the relevant guides in the compass.
"""
return self._guides.iteritems()
def iterboxes(self):
""" Iterate the boxes which lie under the guides.
Returns
-------
result : iterable
An iterable of GuideImage instances which are the boxes
to be painted under the guides.
"""
yield self._box
def layout(self, pos):
""" Layout the guides for the extended compass.
Parameters
----------
pos : QPoint
The center point of the compass.
"""
x = pos.x()
y = pos.y()
Guide = QGuideRose.Guide
guides = self._guides
guides[Guide.CompassNorth].rect = QRect(x - 15, y - 64, 31, 31)
guides[Guide.CompassEast].rect = QRect(x + 34, y - 15, 31, 31)
guides[Guide.CompassSouth].rect = QRect(x - 15, y + 34, 31, 31)
guides[Guide.CompassWest].rect = QRect(x - 64, y - 15, 31, 31)
guides[Guide.CompassCenter].rect = QRect(x - 15, y - 15, 31, 31)
guides[Guide.CompassExNorth].rect = QRect(x - 15, y - 29, 31, 10)
guides[Guide.CompassExEast].rect = QRect(x + 20, y - 15, 10, 31)
guides[Guide.CompassExSouth].rect = QRect(x - 15, y + 20, 31, 10)
guides[Guide.CompassExWest].rect = QRect(x - 29, y - 15, 10, 31)
self._box.rect = QRect(x - 69, y - 69, 139, 139)
class SingleGuide(GuideHandler):
""" A base class for defining a single guide.
"""
guide_enum = Int(QGuideRose.Guide.NoGuide)
image_name = Str('')
_box = Value(factory=lambda: GuideImage('guide_box'))
_guide = Typed(GuideImage)
def _default__guide(self):
""" The default value handler for the '_guide' attribute.
"""
return GuideImage(self.image_name)
def iterguides(self):
""" Iterate the guides for the compass.
Returns
-------
result : generator
A generator which yields 2-tuples of (enum, guide) for
the relevant guides in the compass.
"""
yield (self.guide_enum, self._guide)
def iterboxes(self):
""" Iterate the boxes which lie under the guides.
Returns
-------
result : iterable
An iterable of GuideImage instances which are the boxes
to be painted under the guides.
"""
yield self._box
def layout(self, pos):
""" Layout the guides for the given position.
Parameters
----------
pos : QPoint
The center point of the guide.
"""
x = pos.x()
y = pos.y()
self._guide.rect = QRect(x - 15, y - 15, 31, 31)
self._box.rect = QRect(x - 20, y - 20, 41, 41)
class SplitHorizontalGuide(SingleGuide):
""" A single guide which uses the horizontal split image.
"""
guide_enum = set_default(QGuideRose.Guide.SplitHorizontal)
image_name = set_default('split_horizontal')
class SplitVerticalGuide(SingleGuide):
""" A single guide which uses the vertical split image.
"""
guide_enum = set_default(QGuideRose.Guide.SplitVertical)
image_name = set_default('split_vertical')
class AreaCenterGuide(SingleGuide):
""" A single guide which uses the area center image.
"""
guide_enum = set_default(QGuideRose.Guide.AreaCenter)
image_name = set_default('center')
|
|
"""Wireup
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import atexit
import fnmatch
import os
import urlparse
from tempfile import mkstemp
import aspen
from aspen.testing.client import Client
from babel.core import Locale
from babel.messages.pofile import read_po
from babel.numbers import parse_pattern
import balanced
import braintree
import gratipay
import gratipay.billing.payday
import raven
import mandrill
from environment import Environment, is_yesish
from gratipay.elsewhere import PlatformRegistry
from gratipay.elsewhere.bitbucket import Bitbucket
from gratipay.elsewhere.bountysource import Bountysource
from gratipay.elsewhere.github import GitHub
from gratipay.elsewhere.facebook import Facebook
from gratipay.elsewhere.google import Google
from gratipay.elsewhere.openstreetmap import OpenStreetMap
from gratipay.elsewhere.twitter import Twitter
from gratipay.elsewhere.venmo import Venmo
from gratipay.models.account_elsewhere import AccountElsewhere
from gratipay.models.community import Community
from gratipay.models.exchange_route import ExchangeRoute
from gratipay.models.participant import Participant
from gratipay.models.team import Team
from gratipay.models import GratipayDB
from gratipay.utils.emails import compile_email_spt
from gratipay.utils.http_caching import asset_etag
from gratipay.utils.i18n import (
ALIASES, ALIASES_R, COUNTRIES, LANGUAGES_2, LOCALES,
get_function_from_rule, make_sorted_dict
)
def base_url(website, env):
gratipay.base_url = website.base_url = env.base_url
def secure_cookies(env):
gratipay.use_secure_cookies = env.base_url.startswith('https')
def db(env):
dburl = env.database_url
maxconn = env.database_maxconn
db = GratipayDB(dburl, maxconn=maxconn)
for model in (AccountElsewhere, Community, ExchangeRoute, Participant, Team):
db.register_model(model)
gratipay.billing.payday.Payday.db = db
return db
def mail(env, project_root='.'):
Participant._mailer = mandrill.Mandrill(env.mandrill_key)
emails = {}
emails_dir = project_root+'/emails/'
i = len(emails_dir)
for spt in find_files(emails_dir, '*.spt'):
base_name = spt[i:-4]
emails[base_name] = compile_email_spt(spt)
Participant._emails = emails
def billing(env):
balanced.configure(env.balanced_api_secret)
if env.braintree_sandbox_mode:
braintree_env = braintree.Environment.Sandbox
else:
braintree_env = braintree.Environment.Production
braintree.Configuration.configure(
braintree_env,
env.braintree_merchant_id,
env.braintree_public_key,
env.braintree_private_key
)
def username_restrictions(website):
gratipay.RESTRICTED_USERNAMES = os.listdir(website.www_root)
def make_sentry_teller(env):
if not env.sentry_dsn:
aspen.log_dammit("Won't log to Sentry (SENTRY_DSN is empty).")
def noop(*a, **kw):
pass
Participant._tell_sentry = noop
return noop
sentry = raven.Client(env.sentry_dsn)
def tell_sentry(exception, state):
# Decide if we care.
# ==================
if isinstance(exception, aspen.Response):
if exception.code < 500:
# Only log server errors to Sentry. For responses < 500 we use
# stream-/line-based access logging. See discussion on:
# https://github.com/gratipay/gratipay.com/pull/1560.
return
# Find a user.
# ============
# | is disallowed in usernames, so we can use it here to indicate
# situations in which we can't get a username.
user = state.get('user')
user_id = 'n/a'
if user is None:
username = '| no user'
else:
is_anon = getattr(user, 'ANON', None)
if is_anon is None:
username = '| no ANON'
elif is_anon:
username = '| anonymous'
else:
participant = getattr(user, 'participant', None)
if participant is None:
username = '| no participant'
else:
username = getattr(user.participant, 'username', None)
if username is None:
username = '| no username'
else:
user_id = user.participant.id
username = username.encode('utf8')
user = { 'id': user_id
, 'is_admin': user.participant.is_admin
, 'is_suspicious': user.participant.is_suspicious
, 'claimed_time': user.participant.claimed_time.isoformat()
, 'url': 'https://gratipay.com/{}/'.format(username)
}
# Fire off a Sentry call.
# =======================
dispatch_result = state.get('dispatch_result')
request = state.get('request')
tags = { 'username': username
, 'user_id': user_id
}
extra = { 'filepath': getattr(dispatch_result, 'match', None)
, 'request': str(request).splitlines()
, 'user': user
}
result = sentry.captureException(tags=tags, extra=extra)
# Emit a reference string to stdout.
# ==================================
ident = sentry.get_ident(result)
aspen.log_dammit('Exception reference: ' + ident)
Participant._tell_sentry = tell_sentry
return tell_sentry
class BadEnvironment(SystemExit):
pass
def accounts_elsewhere(website, env):
twitter = Twitter(
env.twitter_consumer_key,
env.twitter_consumer_secret,
env.twitter_callback,
)
facebook = Facebook(
env.facebook_app_id,
env.facebook_app_secret,
env.facebook_callback,
)
github = GitHub(
env.github_client_id,
env.github_client_secret,
env.github_callback,
)
google = Google(
env.google_client_id,
env.google_client_secret,
env.google_callback,
)
bitbucket = Bitbucket(
env.bitbucket_consumer_key,
env.bitbucket_consumer_secret,
env.bitbucket_callback,
)
openstreetmap = OpenStreetMap(
env.openstreetmap_consumer_key,
env.openstreetmap_consumer_secret,
env.openstreetmap_callback,
env.openstreetmap_api_url,
env.openstreetmap_auth_url,
)
bountysource = Bountysource(
None,
env.bountysource_api_secret,
env.bountysource_callback,
env.bountysource_api_host,
env.bountysource_www_host,
)
venmo = Venmo(
env.venmo_client_id,
env.venmo_client_secret,
env.venmo_callback,
)
signin_platforms = [twitter, github, facebook, google, bitbucket, openstreetmap]
website.signin_platforms = PlatformRegistry(signin_platforms)
AccountElsewhere.signin_platforms_names = tuple(p.name for p in signin_platforms)
# For displaying "Connected Accounts"
website.social_profiles = [twitter, github, facebook, google, bitbucket, openstreetmap, bountysource]
all_platforms = signin_platforms + [bountysource, venmo]
website.platforms = AccountElsewhere.platforms = PlatformRegistry(all_platforms)
friends_platforms = [p for p in website.platforms if getattr(p, 'api_friends_path', None)]
website.friends_platforms = PlatformRegistry(friends_platforms)
for platform in all_platforms:
platform.icon = website.asset('platforms/%s.16.png' % platform.name)
platform.logo = website.asset('platforms/%s.png' % platform.name)
def cryptocoin_networks(website):
website.cryptocoin_networks = [
{
'name': 'bitcoin',
'display_name': 'Bitcoin',
'logo': website.asset('cryptocoins/bitcoin.png'),
},
]
def find_files(directory, pattern):
for root, dirs, files in os.walk(directory):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(root, filename)
def compile_assets(website):
client = Client(website.www_root, website.project_root)
client._website = website
for spt in find_files(website.www_root+'/assets/', '*.spt'):
filepath = spt[:-4] # /path/to/www/assets/foo.css
urlpath = spt[spt.rfind('/assets/'):-4] # /assets/foo.css
try:
# Remove any existing compiled asset, so we can access the dynamic
# one instead (Aspen prefers foo.css over foo.css.spt).
os.unlink(filepath)
except:
pass
headers = {}
if website.base_url:
url = urlparse.urlparse(website.base_url)
headers[b'HTTP_X_FORWARDED_PROTO'] = str(url.scheme)
headers[b'HTTP_HOST'] = str(url.netloc)
content = client.GET(urlpath, **headers).body
tmpfd, tmpfpath = mkstemp(dir='.')
os.write(tmpfd, content)
os.close(tmpfd)
os.rename(tmpfpath, filepath)
atexit.register(lambda: clean_assets(website.www_root))
def clean_assets(www_root):
for spt in find_files(www_root+'/assets/', '*.spt'):
try:
os.unlink(spt[:-4])
except:
pass
def load_i18n(project_root, tell_sentry):
# Load the locales
localeDir = os.path.join(project_root, 'i18n', 'core')
locales = LOCALES
for file in os.listdir(localeDir):
try:
parts = file.split(".")
if not (len(parts) == 2 and parts[1] == "po"):
continue
lang = parts[0]
with open(os.path.join(localeDir, file)) as f:
l = locales[lang.lower()] = Locale(lang)
c = l.catalog = read_po(f)
c.plural_func = get_function_from_rule(c.plural_expr)
try:
l.countries = make_sorted_dict(COUNTRIES, l.territories)
except KeyError:
l.countries = COUNTRIES
try:
l.languages_2 = make_sorted_dict(LANGUAGES_2, l.languages)
except KeyError:
l.languages_2 = LANGUAGES_2
except Exception as e:
tell_sentry(e, {})
# Add aliases
for k, v in list(locales.items()):
locales.setdefault(ALIASES.get(k, k), v)
locales.setdefault(ALIASES_R.get(k, k), v)
for k, v in list(locales.items()):
locales.setdefault(k.split('_', 1)[0], v)
# Patch the locales to look less formal
locales['fr'].currency_formats[None] = parse_pattern('#,##0.00\u202f\xa4')
locales['fr'].currency_symbols['USD'] = '$'
def other_stuff(website, env):
website.cache_static = env.gratipay_cache_static
website.compress_assets = env.gratipay_compress_assets
if website.cache_static:
def asset(path):
fspath = website.www_root+'/assets/'+path
etag = ''
try:
etag = asset_etag(fspath)
except Exception as e:
website.tell_sentry(e, {})
return env.gratipay_asset_url+path+(etag and '?etag='+etag)
website.asset = asset
compile_assets(website)
else:
website.asset = lambda path: env.gratipay_asset_url+path
clean_assets(website.www_root)
website.optimizely_id = env.optimizely_id
website.include_piwik = env.include_piwik
website.log_metrics = env.log_metrics
def env():
env = Environment(
BASE_URL = unicode,
DATABASE_URL = unicode,
DATABASE_MAXCONN = int,
GRATIPAY_ASSET_URL = unicode,
GRATIPAY_CACHE_STATIC = is_yesish,
GRATIPAY_COMPRESS_ASSETS = is_yesish,
BALANCED_API_SECRET = unicode,
BRAINTREE_SANDBOX_MODE = is_yesish,
BRAINTREE_MERCHANT_ID = unicode,
BRAINTREE_PUBLIC_KEY = unicode,
BRAINTREE_PRIVATE_KEY = unicode,
GITHUB_CLIENT_ID = unicode,
GITHUB_CLIENT_SECRET = unicode,
GITHUB_CALLBACK = unicode,
BITBUCKET_CONSUMER_KEY = unicode,
BITBUCKET_CONSUMER_SECRET = unicode,
BITBUCKET_CALLBACK = unicode,
TWITTER_CONSUMER_KEY = unicode,
TWITTER_CONSUMER_SECRET = unicode,
TWITTER_CALLBACK = unicode,
FACEBOOK_APP_ID = unicode,
FACEBOOK_APP_SECRET = unicode,
FACEBOOK_CALLBACK = unicode,
GOOGLE_CLIENT_ID = unicode,
GOOGLE_CLIENT_SECRET = unicode,
GOOGLE_CALLBACK = unicode,
BOUNTYSOURCE_API_SECRET = unicode,
BOUNTYSOURCE_CALLBACK = unicode,
BOUNTYSOURCE_API_HOST = unicode,
BOUNTYSOURCE_WWW_HOST = unicode,
VENMO_CLIENT_ID = unicode,
VENMO_CLIENT_SECRET = unicode,
VENMO_CALLBACK = unicode,
OPENSTREETMAP_CONSUMER_KEY = unicode,
OPENSTREETMAP_CONSUMER_SECRET = unicode,
OPENSTREETMAP_CALLBACK = unicode,
OPENSTREETMAP_API_URL = unicode,
OPENSTREETMAP_AUTH_URL = unicode,
UPDATE_GLOBAL_STATS_EVERY = int,
CHECK_DB_EVERY = int,
DEQUEUE_EMAILS_EVERY = int,
OPTIMIZELY_ID = unicode,
SENTRY_DSN = unicode,
LOG_METRICS = is_yesish,
INCLUDE_PIWIK = is_yesish,
MANDRILL_KEY = unicode,
RAISE_SIGNIN_NOTIFICATIONS = is_yesish,
# This is used in our Procfile. (PORT is also used but is provided by
# Heroku; we don't set it ourselves in our app config.)
GUNICORN_OPTS = unicode,
)
# Error Checking
# ==============
if env.malformed:
these = len(env.malformed) != 1 and 'these' or 'this'
plural = len(env.malformed) != 1 and 's' or ''
aspen.log_dammit("=" * 42)
aspen.log_dammit( "Oh no! Gratipay.com couldn't understand %s " % these
, "environment variable%s:" % plural
)
aspen.log_dammit(" ")
for key, err in env.malformed:
aspen.log_dammit(" {} ({})".format(key, err))
aspen.log_dammit(" ")
aspen.log_dammit("See ./default_local.env for hints.")
aspen.log_dammit("=" * 42)
keys = ', '.join([key for key in env.malformed])
raise BadEnvironment("Malformed envvar{}: {}.".format(plural, keys))
if env.missing:
these = len(env.missing) != 1 and 'these' or 'this'
plural = len(env.missing) != 1 and 's' or ''
aspen.log_dammit("=" * 42)
aspen.log_dammit( "Oh no! Gratipay.com needs %s missing " % these
, "environment variable%s:" % plural
)
aspen.log_dammit(" ")
for key in env.missing:
aspen.log_dammit(" " + key)
aspen.log_dammit(" ")
aspen.log_dammit( "(Sorry, we must've started looking for "
, "%s since you last updated Gratipay!)" % these
)
aspen.log_dammit(" ")
aspen.log_dammit("Running Gratipay locally? Edit ./local.env.")
aspen.log_dammit("Running the test suite? Edit ./tests/env.")
aspen.log_dammit(" ")
aspen.log_dammit("See ./default_local.env for hints.")
aspen.log_dammit("=" * 42)
keys = ', '.join([key for key in env.missing])
raise BadEnvironment("Missing envvar{}: {}.".format(plural, keys))
return env
if __name__ == '__main__':
env()
|
|
"""
The :mod:`source` module concerns itself with manipulating
buffers of source code: creating ranges of characters corresponding
to a token, combining these ranges, extracting human-readable
location information and original source from a range.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import bisect
class Buffer:
"""
A buffer containing source code and location information.
:ivar source: (string) source code
:ivar name: (string) input filename or another description
of the input (e.g. ``<stdin>``).
:ivar line: (integer) first line of the input
"""
def __init__(self, source, name="<input>", first_line=1):
self.source = source
self.name = name
self.first_line = first_line
self._line_begins = None
def __repr__(self):
return "Buffer(\"%s\")" % self.name
def source_line(self, lineno):
"""
Returns line ``lineno`` from source, taking ``first_line`` into account,
or raises :exc:`IndexError` if ``lineno`` is out of range.
"""
line_begins = self._extract_line_begins()
lineno = lineno - self.first_line
if lineno >= 0 and lineno + 1 < len(line_begins):
first, last = line_begins[lineno:lineno + 2]
return self.source[first:last]
elif lineno >= 0 and lineno < len(line_begins):
return self.source[line_begins[-1]:]
else:
raise IndexError
def decompose_position(self, offset):
"""
Returns a ``line, column`` tuple for a character offset into the source,
orraises :exc:`IndexError` if ``lineno`` is out of range.
"""
line_begins = self._extract_line_begins()
lineno = bisect.bisect_right(line_begins, offset) - 1
if offset >= 0 and offset <= len(self.source):
return lineno + self.first_line, offset - line_begins[lineno]
else:
raise IndexError
def _extract_line_begins(self):
if self._line_begins:
return self._line_begins
self._line_begins = [0]
index = None
while True:
index = self.source.find("\n", index) + 1
if index == 0:
return self._line_begins
self._line_begins.append(index)
class Range:
"""
Location of an exclusive range of characters [*begin_pos*, *end_pos*)
in a :class:`Buffer`.
:ivar begin_pos: (integer) offset of the first character
:ivar end_pos: (integer) offset of the character before the last
:ivar expanded_from: (Range or None) the range from which this range was expanded
"""
def __init__(self, source_buffer, begin_pos, end_pos, expanded_from=None):
self.source_buffer = source_buffer
self.begin_pos = begin_pos
self.end_pos = end_pos
self.expanded_from = expanded_from
def __repr__(self):
"""
Returns a human-readable representation of this range.
"""
return "Range(\"%s\", %d, %d, %s)" % \
(self.source_buffer.name, self.begin_pos, self.end_pos, repr(self.expanded_from))
def chain(self, expanded_from):
"""
Returns a range identical to this one, but indicating that
it was expanded from the range `expanded_from`.
"""
return Range(self.source_buffer, self.begin_pos, self.begin_pos,
expanded_from=expanded_from)
def begin(self):
"""
Returns a zero-length range located just before the beginning of this range.
"""
return Range(self.source_buffer, self.begin_pos, self.begin_pos,
expanded_from=self.expanded_from)
def end(self):
"""
Returns a zero-length range located just after the end of this range.
"""
return Range(self.source_buffer, self.end_pos, self.end_pos,
expanded_from=self.expanded_from)
def size(self):
"""
Returns the amount of characters spanned by the range.
"""
return self.end_pos - self.begin_pos
def column(self):
"""
Returns a zero-based column number of the beginning of this range.
"""
line, column = self.source_buffer.decompose_position(self.begin_pos)
return column
def column_range(self):
"""
Returns a [*begin*, *end*) tuple describing the range of columns spanned
by this range. If range spans more than one line, returned *end* is
the last column of the line.
"""
if self.begin().line() == self.end().line():
return self.begin().column(), self.end().column()
else:
return self.begin().column(), len(self.begin().source_line()) - 1
def line(self):
"""
Returns the line number of the beginning of this range.
"""
line, column = self.source_buffer.decompose_position(self.begin_pos)
return line
def join(self, other):
"""
Returns the smallest possible range spanning both this range and other.
Raises :exc:`ValueError` if the ranges do not belong to the same
:class:`Buffer`.
"""
if self.source_buffer != other.source_buffer:
raise ValueError
if self.expanded_from == other.expanded_from:
expanded_from = self.expanded_from
else:
expanded_from = None
return Range(self.source_buffer,
min(self.begin_pos, other.begin_pos),
max(self.end_pos, other.end_pos),
expanded_from=expanded_from)
def source(self):
"""
Returns the source code covered by this range.
"""
return self.source_buffer.source[self.begin_pos:self.end_pos]
def source_line(self):
"""
Returns the line of source code containing the beginning of this range.
"""
return self.source_buffer.source_line(self.line())
def source_lines(self):
"""
Returns the lines of source code containing the entirety of this range.
"""
return [self.source_buffer.source_line(line)
for line in range(self.line(), self.end().line() + 1)]
def __str__(self):
"""
Returns a Clang-style string representation of the beginning of this range.
"""
if self.begin_pos != self.end_pos:
return "%s:%d:%d-%d:%d" % (self.source_buffer.name,
self.line(), self.column() + 1,
self.end().line(), self.end().column() + 1)
else:
return "%s:%d:%d" % (self.source_buffer.name,
self.line(), self.column() + 1)
def __eq__(self, other):
"""
Returns true if the ranges have the same source buffer, start and end position.
"""
return (type(self) == type(other) and
self.source_buffer == other.source_buffer and
self.begin_pos == other.begin_pos and
self.end_pos == other.end_pos and
self.expanded_from == other.expanded_from)
def __ne__(self, other):
"""
Inverse of :meth:`__eq__`.
"""
return not (self == other)
def __hash__(self):
return hash((self.source_buffer, self.begin_pos, self.end_pos, self.expanded_from))
class Comment:
"""
A comment in the source code.
:ivar loc: (:class:`Range`) source location
:ivar text: (string) comment text
"""
def __init__(self, loc, text):
self.loc, self.text = loc, text
class RewriterConflict(Exception):
"""
An exception that is raised when two ranges supplied to a rewriter overlap.
:ivar first: (:class:`Range`) first overlapping range
:ivar second: (:class:`Range`) second overlapping range
"""
def __init__(self, first, second):
self.first, self.second = first, second
exception.__init__(self, "Ranges %s and %s overlap" % (repr(first), repr(second)))
class Rewriter:
"""
The :class:`Rewriter` class rewrites source code: performs bulk modification
guided by a list of ranges and code fragments replacing their original
content.
:ivar buffer: (:class:`Buffer`) buffer
"""
def __init__(self, buffer):
self.buffer = buffer
self.ranges = []
def replace(self, range, replacement):
"""Remove `range` and replace it with string `replacement`."""
self.ranges.append((range, replacement))
def remove(self, range):
"""Remove `range`."""
self.replace(range, "")
def insert_before(self, range, text):
"""Insert `text` before `range`."""
self.replace(range.begin(), text)
def insert_after(self, range, text):
"""Insert `text` after `range`."""
self.replace(range.end(), text)
def rewrite(self):
"""Return the rewritten source. May raise :class:`RewriterConflict`."""
self._sort()
self._check()
rewritten, pos = [], 0
for range, replacement in self.ranges:
rewritten.append(self.buffer.source[pos:range.begin_pos])
rewritten.append(replacement)
pos = range.end_pos
rewritten.append(self.buffer.source[pos:])
return Buffer("".join(rewritten), self.buffer.name, self.buffer.first_line)
def _sort(self):
self.ranges.sort(key=lambda x: x[0].begin_pos)
def _check(self):
for (fst, _), (snd, _) in zip(self.ranges, self.ranges[1:]):
if snd.begin_pos < fst.end_pos:
raise RewriterConflict(fst, snd)
|
|
"""
Author: Benjamin Torben-Nielsen
Date: 18/08/2015
"""
import h5py
import numpy as np
import neuron
from neuron import h
import btstructs
"""
- Convert BBP's H5 morphology format to the more standard SWC format.
- Create a passive model from an SWC file
"""
class h5_point(object) :
def __init__(self,x,y,z,radius) :
self.x = x
self.y = y
self.z = z
self.radius = radius
class h5_structure(object) :
def __init__(self,start_index,n_type,parent_section_index) :
self.start_index = start_index
self.n_type = n_type
self.parent_section_index = parent_section_index
def _get_H5_points(h5_file) :
points = {}
h5_points = h5_file['points']
for index, item in zip(range(len(h5_points)),h5_points) :
points[index] = h5_point(item[0],item[1],item[2],item[3])
return points
def _get_h5_structure(h5_file) :
structures = {}
if 'structure' in h5_file :
h5_structures = h5_file['structure']
for index, item in zip(range(len(h5_structures)),h5_structures) :
structures[index] = h5_structure(item[0], \
item[1],item[2])
return structures
def _create_three_point_soma(structure,points) :
"""
Create a three-point soma assuming that the first entry in the H5 \
structure field is the soma
1 1 xs ys zs rs -1
2 1 xs (ys-rs) zs rs 1
3 1 xs (ys+rs) zs rs 1
with xs,ys,zs being point in the middle of the contour and rs being
the average distance between the center and the contour
http://neuromorpho.org/neuroMorpho/SomaFormat.html
"""
xs,ys,zs = [],[],[]
end_of_soma_index = structure[1].start_index-1
for index in range(end_of_soma_index) :
p = points[index]
xs.append(p.x)
ys.append(p.y)
zs.append(p.z)
center_x = np.mean(xs)
center_y = np.mean(ys)
center_z = np.mean(zs)
rs = 0
for x,y,z in zip(xs,ys,zs) :
rs = rs + np.sqrt( (x-center_x)**2 + (y-center_y)**2 + (z-center_z)**2 )
rs = rs / len(xs)
print 'rs=',rs
line = '1 1 '+str(center_x)+' '+str(center_y)+' '+str(center_z) + ' ' + str(rs) + ' -1\n'
line += '2 1 '+str(center_x)+' '+str(center_y-rs)+' '+str(center_z) + ' ' + str(rs) + ' 1\n'
line += '3 1 '+str(center_x)+' '+str(center_y+rs)+' '+str(center_z) + ' ' + str(rs) + ' 1\n'
return line
def _fetch_structure_information(index,structure) :
for seg_id in structure :
struct = structure[seg_id]
if struct.start_index == index :
# print 'index=',index
parent_section_index = struct.parent_section_index
if parent_section_index == 0 :
real_parent_index = 1
else :
real_parent_index = structure[parent_section_index+1].start_index-1
section_type = struct.n_type #structure[parent_section_index].n_type
return real_parent_index,section_type
# print 'returning -10 for index=',index
# raw_input('Press ENTER')
return index - 1, None
def convert_h5_to_SWC(h5_file_name, types=[3,4], swc_given_name=None) :
"""
Convert h5 file to SWC file
Arguments:
- h5_file_name: string with the file name
- types: list with the neurite types to include, 2: axon, 3: basal , 4: apical
- swc_given_name: filename of the SWC output file. If not set, default \
behaviour simpy replaces with *.h5 with *.swc
"""
# load points and structure from H5 file
h5_file = h5py.File(h5_file_name,'r')
points = _get_H5_points(h5_file)
structure = _get_h5_structure(h5_file)
structure2 = h5_file['structure']
h5_file.close()
# directly convert into an SWC file
if swc_given_name == None :
swc_file_name = h5_file_name[:-2]+'swc'
else :
swc_file_name = swc_given_name
swc_file = open(swc_file_name,'w')
# main loop
end_of_soma_index = 1000
for index in points :
p = points[index]
if index == 0 :
end_of_soma_index = structure[1].start_index-1
swc_line = _create_three_point_soma(structure,points)
# raw_input('soma, end=%i, press ENTER' % end_of_soma_index)
swc_file.write(swc_line)
elif index <= end_of_soma_index:
#skip the soma
pass
else :
parent_index,point_type = _fetch_structure_information(index,structure)
point_type = point_type if point_type != None else int(swc_line.split(' ')[1])
swc_line = str(index)+' '+str(point_type)+' ' \
+str(p.x)+' '+str(p.y)+' '+str(p.z)+' '+str(p.radius) \
+ ' '+str(parent_index) + '\n'
if point_type in types :
swc_file.write(swc_line)
swc_file.flush()
swc_file.close()
return 0
rs = 0
def create_NRN_from_SWC(file_name,**kwargs) :
global rs
"""
Create a passive multi-compartmental model in pyNRN from an SWC file
"""
swc_tree = btstructs.STree()
swc_tree.read_SWC_tree_from_file(file_name)
nodes = swc_tree.get_nodes()
rs = nodes[1].get_content()['p3d'].y
sections = {}
h.load_file("stdlib.hoc") # contains the lambda rule
for node in nodes :
sections.update({node.get_index(): \
_make_section(node,node.get_index,sections,**kwargs)})
return sections
def _make_section(node,index,sections,**kwargs) :
compartment = neuron.h.Section(name=str(index)) # NEW NRN SECTION
# assume three point soma
if node.get_index() not in [1,2,3] :
pPos = node.get_parent_node().get_content()['p3d']
cPos = node.get_content()['p3d']
compartment.push()
h.pt3dadd(float(pPos.x),float(pPos.y),float(pPos.z),float(pPos.radius))
h.pt3dadd(float(cPos.x),float(cPos.y),float(cPos.z),float(cPos.radius))
# nseg according to NEURON book
compartment.nseg =int(((compartment.L/(0.1*h.lambda_f(100))+0.9)/2)*2+1)
# passive properties
compartment.cm = kwargs['cm'] if 'cm' in kwargs else 0.9
compartment.Ra = kwargs['ra'] if 'ra' in kwargs else 200
compartment.insert('pas')
compartment.e_pas = kwargs['e_pas'] if 'e_pas' in kwargs else -65
compartment.g_pas = kwargs['g_pas'] if 'g_pas' in kwargs else 1.0/25000
h.pop_section()
compartment.connect(sections.get(node.get_parent_node().get_index()),\
1,0)
return compartment
else :
if node.get_index() == 1 :
# root of SWC tree = soma
cPos = node.get_content()['p3d']
compartment.push()
compartment.diam=rs#cPos.radius
compartment.L=rs#cPos.radius
# passive properties
compartment.cm = kwargs['cm'] if 'cm' in kwargs else 0.9
compartment.Ra = kwargs['ra'] if 'ra' in kwargs else 200
compartment.insert('pas')
compartment.e_pas = kwargs['e_pas'] if 'e_pas' in kwargs else -65
compartment.g_pas = kwargs['g_pas'] if 'g_pas' in kwargs else 1.0/25000
h.pop_section()
#self._soma = compartment
return compartment
#return compartment
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow 2.0 layer behavior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools as it
import sys
import traceback
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DynamicLayer1(base_layer.Layer):
def __init__(self, dynamic=False, **kwargs):
super(DynamicLayer1, self).__init__(dynamic=dynamic, **kwargs)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return math_ops.sqrt(inputs)
else:
return math_ops.square(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class DynamicLayer2(base_layer.Layer):
def __init__(self, dynamic=False, **kwargs):
super(DynamicLayer2, self).__init__(dynamic=dynamic, **kwargs)
def call(self, inputs):
samples = []
for sample in inputs:
samples.append(math_ops.square(sample))
return array_ops.stack(samples, axis=0)
def compute_output_shape(self, input_shape):
return input_shape
class InvalidLayer(base_layer.Layer):
def call(self, inputs):
raise ValueError('You did something wrong!')
class BaseLayerTest(keras_parameterized.TestCase):
@parameterized.parameters(DynamicLayer1, DynamicLayer2)
def test_dynamic_layer_in_functional_model_in_graph_mode(self, layer_class):
with context.graph_mode():
inputs = keras.Input((3,))
# Works when `dynamic=True` is declared.
outputs = layer_class(dynamic=True)(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
# But then you cannot run the model since you're in a graph scope.
with self.assertRaisesRegexp(
ValueError, 'You must enable eager execution'):
model.compile(rmsprop.RMSprop(0.001), loss='mse')
# Fails when `dynamic=True` not declared.
with self.assertRaisesRegexp(
TypeError, 'attempting to use Python control flow'):
_ = layer_class()(inputs)
@parameterized.parameters(DynamicLayer1, DynamicLayer2)
def test_dynamic_layer_in_functional_model_in_eager_mode(self, layer_class):
inputs = keras.Input((3,))
# Fails when `dynamic=True` not declared.
with self.assertRaisesRegexp(
TypeError, 'attempting to use Python control flow'):
_ = layer_class()(inputs)
# Works when `dynamic=True` is declared.
outputs = layer_class(dynamic=True)(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_nested_dynamic_layers_in_eager_mode(self):
inputs = keras.Input((3,))
outputs = DynamicLayer1(dynamic=True)(inputs)
inner_model = keras.Model(inputs, outputs)
self.assertEqual(inner_model.dynamic, True)
inputs = keras.Input((3,))
x = DynamicLayer2(dynamic=True)(inputs)
outputs = inner_model(x)
model = keras.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_layers_in_sequential_model(self):
# Without input_shape argument
model = keras.Sequential([DynamicLayer1(dynamic=True),
keras.layers.Dense(3),
DynamicLayer2(dynamic=True)])
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
# With input_shape argument
model = keras.Sequential([DynamicLayer1(dynamic=True, input_shape=(3,)),
DynamicLayer2(dynamic=True)])
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_layers_in_subclassed_model(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.layer1 = DynamicLayer1(dynamic=True)
def call(self, inputs):
return self.layer1(inputs)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_subclassed_model_no_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs, [None])
def test_dynamic_subclassed_model_with_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
def compute_output_shape(self, input_shape):
return tensor_shape.TensorShape(
tuple(input_shape[:-1].as_list()) + (3,))
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs[0].shape.as_list(), [None, 3])
@test_util.run_in_graph_and_eager_modes
def test_invalid_forward_pass(self):
inputs = keras.Input((3,))
with self.assertRaisesRegexp(ValueError, 'You did something wrong!'):
_ = InvalidLayer()(inputs)
@keras_parameterized.run_with_all_model_types
@test_util.run_in_graph_and_eager_modes
def test_build_with_numpy_data(self):
model_layers = [
keras.layers.Dense(3, activation='relu', kernel_initializer='ones'),
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model(np.zeros((2, 4), dtype='float32'))
self.assertTrue(model.built)
@test_util.run_in_graph_and_eager_modes
def test_default_add_weight(self):
class TestLayer(keras.layers.Layer):
def __init__(self):
super(TestLayer, self).__init__()
self.default_weight = self.add_weight()
self.weight_without_name = self.add_weight(shape=(3, 4))
self.regularized_weight_without_name = self.add_weight(
shape=(3, 4), regularizer='l2')
layer = TestLayer()
self.assertEqual(layer.default_weight.shape.as_list(), [])
self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4])
self.assertEqual(layer.default_weight.dtype.name, 'float32')
self.assertEqual(layer.weight_without_name.dtype.name, 'float32')
self.assertEqual(len(layer.losses), 1)
if not context.executing_eagerly():
# Cannot access tensor.name in eager execution.
self.assertTrue('Variable_2/Regularizer' in layer.losses[0].name)
def test_learning_phase_freezing_for_layers(self):
# This test is only meant to run in graph functions mode (ambient eager).
# In forced eager, `model.predict` ignores the global learning phase
# and just uses training=False. TODO(fchollet): consider unifying the
# behaviors.
class LearningPhaseLayer(keras.layers.Layer):
def call(self, inputs):
return keras.backend.in_train_phase(
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
def get_learning_phase_value():
model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])
return np.sum(model.predict(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
# Test scope.
with keras.backend.learning_phase_scope(1):
self.assertEqual(get_learning_phase_value(), 1)
# The effects of the scope end after exiting it.
self.assertEqual(get_learning_phase_value(), 0)
# Test setting.
keras.backend.set_learning_phase(1)
self.assertEqual(get_learning_phase_value(), 1)
keras.backend.set_learning_phase(0)
self.assertEqual(get_learning_phase_value(), 0)
# Cannot be enabled with `run_eagerly=True`, see b/123904578
@test_util.run_all_in_graph_and_eager_modes
def test_layer_can_return_variable(self):
class ComputeSum(keras.layers.Layer):
def __init__(self):
super(ComputeSum, self).__init__()
self.total = variables.Variable(
initial_value=array_ops.zeros((1, 1)), trainable=False)
if not context.executing_eagerly():
keras.backend.get_session().run(self.total.initializer)
def call(self, inputs):
self.total.assign_add(inputs)
return self.total
inputs = keras.Input(shape=(1,))
model = keras.Model(inputs, ComputeSum()(inputs))
model.predict(np.ones((1, 1)))
def _get_layer_with_training_arg(self):
class TrainingLayer(keras.layers.Layer):
"""A layer with a `training` argument in a defuned `call`."""
@def_function.function
def call(self, inputs, training=None):
if training is None:
training = keras.backend.learning_phase()
return tf_utils.smart_cond(training,
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
return TrainingLayer()
@keras_parameterized.run_with_all_model_types
# b/124459427: can't test with `run_eagerly=True` for now.
@test_util.run_in_graph_and_eager_modes
def test_training_arg_in_defun(self):
layer = self._get_layer_with_training_arg()
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 1.)
loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(loss, 0.)
# Test that the argument injection performed in `call` is not active
# when the argument is passed explicitly.
layer = self._get_layer_with_training_arg()
inputs = keras.Input(shape=(1,))
# Pass `training` by name
outputs = layer(inputs, training=False)
model = keras.Model(inputs, outputs)
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 0.)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_raw_variable_assignment(self):
class RawVariableLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super(RawVariableLayer, self).__init__(**kwargs)
# Test variables in nested structure.
self.var_list = [variables.Variable(1.), {'a': variables.Variable(2.)}]
def call(self, inputs):
return inputs * self.var_list[0] * self.var_list[1]['a']
model = testing_utils.get_model_from_layers([RawVariableLayer()],
input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 10))
# Checks that variables get initialized.
model.fit(x, y, batch_size=2, epochs=2)
class SymbolicSupportTest(test.TestCase):
def test_using_symbolic_tensors_with_tf_ops(self):
# Single-input.
x = keras.Input((3,))
y = math_ops.square(x)
self.assertEqual(y.graph, keras.backend.get_graph())
# Multi-inputs.
x1, x2 = keras.Input((3,)), keras.Input((3,))
y = array_ops.concat([x1, x2], axis=1)
self.assertEqual(y.graph, keras.backend.get_graph())
# Mixing Keras symbolic tensors and graph tensors from the same graph works.
with keras.backend.get_graph().as_default():
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
# Creating same op type (matmul) multiple times in the Keras graph works.
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
def test_mixing_eager_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = array_ops.ones((3, 3))
self.assertIsInstance(x2, ops.EagerTensor)
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
def test_mixing_numpy_arrays_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = np.ones((3, 3), dtype='float32')
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_eager_tensors(self):
x1 = keras.Input((3,))
x2 = array_ops.ones((3, 3))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self):
x1 = keras.Input((3,))
x2 = np.ones((3, 3), dtype='float32')
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_reraising_exception(self):
# When layer is not dynamic, we have some pattern matching during exception
# handling to detect when the user is trying to use python control flow.
# When an exception is thrown but the pattern doesn't match, we want to
# preserve the originating stack trace. An early implementation of this
# logic lost the stack trace. We test the correct behavior here.
class TypeErrorLayer(base_layer.Layer):
def call(self, inputs):
def easily_identifiable_name():
raise TypeError('Non-matching TypeError message.')
easily_identifiable_name()
inputs = keras.Input((3,))
try:
_ = TypeErrorLayer()(inputs)
except TypeError:
tb = traceback.extract_tb(sys.exc_info()[2])
last_entry = tb[-1]
function_name = last_entry[2]
self.assertEqual(function_name, 'easily_identifiable_name')
@test_util.run_all_in_graph_and_eager_modes
class NestedTrackingTest(test.TestCase):
def test_nested_layer_variable_tracking(self):
# Test that variables from nested sublayers are
# being tracked by subclassed layers.
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.dense1 = keras.layers.Dense(1)
self.dense2 = keras.layers.BatchNormalization()
def build(self, input_shape):
self.v1 = self.add_weight('v1', shape=input_shape[1:].as_list())
self.v2 = variables.Variable(
name='v2',
initial_value=np.zeros(input_shape[1:].as_list(), dtype='float32'),
trainable=False)
def call(self, inputs):
x = self.dense1(inputs) + self.dense2(inputs)
return x + self.v1 + self.v2
layer = MyLayer()
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 5)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.dense1.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 5)
layer.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 8)
self.assertEqual(
set([layer.dense1, layer.dense2, layer.v1, layer.v2]),
set([obj for unused_name, obj in layer._checkpoint_dependencies]))
def test_nested_layer_updates_losses_tracking(self):
# Test that updates and losses from nested sublayers are
# being tracked by subclassed layers.
class UpdateAndLossLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
return inputs + 1
class MyLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def __init__(self):
super(MyLayer, self).__init__()
self.ul1 = UpdateAndLossLayer()
self.ul2 = UpdateAndLossLayer()
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
x = self.ul1(inputs)
return self.ul2(x)
layer = MyLayer()
if context.executing_eagerly():
inputs = array_ops.ones((3, 1))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
else:
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.updates), 3)
def test_attribute_reassignment(self):
l = keras.layers.Layer()
l.a = keras.layers.Layer()
l.a = []
l.a = variables.Variable(1.)
l.a = keras.layers.Layer()
last_assignment = keras.layers.Layer()
l.a = last_assignment
l.b = variables.Variable(1.)
del l.b
l.c = keras.layers.Layer()
del l.c
l.d = last_assignment
del l.d
self.assertEqual([last_assignment], l._layers)
self.assertEqual([], l.trainable_weights)
self.assertEqual([], l.non_trainable_weights)
self.assertEqual([], l.weights)
del l.a
self.assertEqual([], l._layers)
@test_util.run_all_in_graph_and_eager_modes
class NameScopingTest(keras_parameterized.TestCase):
def test_name_scope_layer(self):
x = keras.backend.placeholder(shape=(10, 10))
layer = keras.layers.Dense(10, name='MyName')
layer(x)
self.assertEqual(layer.bias.name, 'MyName/bias:0')
self.assertEqual(layer.kernel.name, 'MyName/kernel:0')
def test_name_scope_sublayer(self):
x = keras.backend.placeholder(shape=(10, 10))
layer = keras.layers.Dense(
10, activation=keras.layers.ReLU(name='MyAct'), name='MyName2')
y = layer(x)
self.assertEqual(layer.bias.name, 'MyName2/bias:0')
self.assertEqual(layer.kernel.name, 'MyName2/kernel:0')
self.assertEqual(y.name, 'MyName2/MyAct/Relu:0')
def test_name_scope_tf_tensor(self):
x = ops.convert_to_tensor(np.ones((10, 10)))
layer = keras.layers.Dense(
10, activation=keras.layers.ReLU(name='MyAct'), name='MyName3')
layer(x)
self.assertEqual(layer.bias.name, 'MyName3/bias:0')
self.assertEqual(layer.kernel.name, 'MyName3/kernel:0')
_LAYERS_TO_TEST = [
(keras.layers.Dense, (1,), collections.OrderedDict(units=[1])),
(keras.layers.Activation, (2, 2),
collections.OrderedDict(activation=['relu'])),
(keras.layers.Dropout, (16,), collections.OrderedDict(rate=[0.25])),
(keras.layers.BatchNormalization, (8, 8, 3), collections.OrderedDict(
axis=[3], center=[True, False], scale=[True, False])),
(keras.layers.Conv1D, (8, 8), collections.OrderedDict(
filters=[1], kernel_size=[1, 3], strides=[1, 2],
padding=['valid', 'same'], use_bias=[True, False],
kernel_regularizer=[None, 'l2'])),
(keras.layers.Conv2D, (8, 8, 3), collections.OrderedDict(
filters=[1], kernel_size=[1, 3], strides=[1, 2],
padding=['valid', 'same'], use_bias=[True, False],
kernel_regularizer=[None, 'l2'])),
(keras.layers.LSTM, (8, 8), collections.OrderedDict(
units=[1],
activation=[None, 'relu'],
kernel_regularizer=[None, 'l2'],
dropout=[0, 0.5],
stateful=[True, False],
unroll=[True, False])),
]
OUTPUT_TEST_CASES = []
for layer_type, inp_shape, arg_dict in _LAYERS_TO_TEST:
arg_combinations = [[(k, i) for i in v] for k, v in arg_dict.items()] # pylint: disable=g-complex-comprehension
for args in it.product(*arg_combinations):
name = '_{}_{}'.format(
layer_type.__name__, '_'.join('{}_{}'.format(k, v) for k, v in args))
OUTPUT_TEST_CASES.append(
(name, layer_type, inp_shape, {k: v for k, v in args}))
class OutputTypeTest(keras_parameterized.TestCase):
"""Test that layers and models produce the correct tensor types."""
# In v1 graph there are only symbolic tensors.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(*OUTPUT_TEST_CASES)
def test_layer_outputs(self, layer_to_test, input_shape, layer_kwargs):
layer = layer_to_test(**layer_kwargs)
input_data = np.ones(shape=(2,) + input_shape, dtype=np.float32)
layer_result = layer(input_data)
inp = keras.layers.Input(shape=input_shape, batch_size=2)
model = keras.models.Model(inp, layer_to_test(**layer_kwargs)(inp))
model_result = model(input_data)
for x in [layer_result, model_result]:
if not isinstance(x, ops.Tensor):
raise ValueError('Tensor or EagerTensor expected, got type {}'
.format(type(x)))
if isinstance(x, ops.EagerTensor) != context.executing_eagerly():
expected_type = (ops.EagerTensor if context.executing_eagerly()
else ops.Tensor)
raise ValueError('Expected type {}, got type {}'
.format(expected_type, type(x)))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
|
from __future__ import division
import numpy as np
from neupy import init
from neupy.utils import format_data
from neupy.exceptions import NotTrained
from neupy.algorithms.base import BaseNetwork
from neupy.core.properties import (
IntProperty, Property,
TypedListProperty, NumberProperty,
)
__all__ = ('LVQ', 'LVQ2', 'LVQ21', 'LVQ3')
def euclid_distance(X, weight):
X = np.expand_dims(X, axis=0)
euclid_dist = np.linalg.norm(X - weight, axis=1)
return np.expand_dims(euclid_dist, axis=0)
def n_argmin(array, n, axis=0):
sorted_argumets = array.argsort(axis=axis).ravel()
return sorted_argumets[:n]
class LVQ(BaseNetwork):
"""
Learning Vector Quantization (LVQ) algorithm.
Notes
-----
- Input data needs to be normalized, because LVQ uses
Euclidean distance to find clusters.
- Training error is just a ratio of misclassified
samples
Parameters
----------
n_inputs : int
Number of input units. It should be equal to the
number of features in the input data set.
n_subclasses : int, None
Defines total number of subclasses. Values should be greater
or equal to the number of classes. ``None`` will set up number
of subclasses equal to the number of classes. Defaults to ``None``
(or the same as ``n_classes``).
n_classes : int
Number of classes in the data set.
prototypes_per_class : list, None
Defines number of prototypes per each class. For instance,
if ``n_classes=3`` and ``n_subclasses=8`` then there are
can be 3 subclasses for the first class, 3 for the second one
and 2 for the third one (3 + 3 + 2 == 8). The following example
can be specified as ``prototypes_per_class=[3, 3, 2]``.
There are two rules that apply to this parameter:
1. ``sum(prototypes_per_class) == n_subclasses``
2. ``len(prototypes_per_class) == n_classes``
The ``None`` value will distribute approximately equal
number of subclasses per each class. It's approximately,
because, for cases, when ``n_subclasses % n_classes != 0``
there is no way to distribute equal number of subclasses
per each class.
Defaults to ``None``.
{BaseNetwork.step}
n_updates_to_stepdrop : int or None
If this options is not equal to ``None`` then after every
update LVQ reduces step size and do it until number of
applied updates would reach the ``n_updates_to_stepdrop``
value. The minimum possible step size defined in the
``minstep`` parameter.
Be aware that number of updates is not the same as number
of epochs. LVQ applies update after each propagated sample
through the network. Relations between this parameter and
maximum number of epochs is following
.. code-block:: python
n_updates_to_stepdrop = n_samples * n_max_epochs
If parameter equal to ``None`` then step size wouldn't be
reduced after each update.
Defaults to ``None``.
minstep : float
Step size would never be lower than this value. This
property useful only in case if ``n_updates_to_stepdrop``
is not ``None``. Defaults to ``1e-5``.
{BaseNetwork.show_epoch}
{BaseNetwork.shuffle_data}
{BaseNetwork.signals}
{Verbose.verbose}
Methods
-------
{BaseSkeleton.predict}
{BaseSkeleton.fit}
Examples
--------
>>> import numpy as np
>>> from neupy import algorithms
>>>
>>> X = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [2, 2], [1, 2]])
>>> y = np.array([0, 0, 0, 1, 1, 1])
>>>
>>> lvqnet = algorithms.LVQ(n_inputs=2, n_classes=2)
>>> lvqnet.train(X, y, epochs=100)
>>> lvqnet.predict([[2, 1], [-1, -1]])
array([1, 0])
"""
n_inputs = IntProperty(minval=1)
n_subclasses = IntProperty(minval=2, default=None, allow_none=True)
n_classes = IntProperty(minval=2)
prototypes_per_class = TypedListProperty(allow_none=True, default=None)
weight = Property(
expected_type=(np.ndarray, init.Initializer),
allow_none=True,
default=None,
)
n_updates_to_stepdrop = IntProperty(
default=None,
allow_none=True,
minval=1,
)
minstep = NumberProperty(minval=0, default=1e-5)
def __init__(self, **options):
self.initialized = False
super(LVQ, self).__init__(**options)
self.n_updates = 0
if self.n_subclasses is None:
self.n_subclasses = self.n_classes
if isinstance(self.weight, init.Initializer):
weight_shape = (self.n_inputs, self.n_subclasses)
self.weight = self.weight.sample(weight_shape, return_array=True)
if self.weight is not None:
self.initialized = True
if self.n_subclasses < self.n_classes:
raise ValueError("Number of subclasses should be greater "
"or equal to the number of classes. Network "
"was defined with {} subclasses and {} classes"
"".format(self.n_subclasses, self.n_classes))
if self.prototypes_per_class is None:
whole, reminder = divmod(self.n_subclasses, self.n_classes)
self.prototypes_per_class = [whole] * self.n_classes
if reminder:
# Since we have reminder left, it means that we cannot
# have an equal number of subclasses per each class,
# therefor we will add +1 to randomly selected class.
class_indices = np.random.choice(self.n_classes, reminder,
replace=False)
for class_index in class_indices:
self.prototypes_per_class[class_index] += 1
if len(self.prototypes_per_class) != self.n_classes:
raise ValueError("LVQ defined for classification problem that has "
"{} classes, but the `prototypes_per_class` "
"variable has defined data for {} classes."
"".format(self.n_classes,
len(self.prototypes_per_class)))
if sum(self.prototypes_per_class) != self.n_subclasses:
raise ValueError("Invalid distribution of subclasses for the "
"`prototypes_per_class` variable. Got total "
"of {} subclasses ({}) instead of {} expected"
"".format(sum(self.prototypes_per_class),
self.prototypes_per_class,
self.n_subclasses))
self.subclass_to_class = []
for class_id, n_prototypes in enumerate(self.prototypes_per_class):
self.subclass_to_class.extend([class_id] * n_prototypes)
@property
def training_step(self):
if self.n_updates_to_stepdrop is None:
return self.step
updates_ratio = (1 - self.n_updates / self.n_updates_to_stepdrop)
return self.minstep + (self.step - self.minstep) * updates_ratio
def predict(self, X):
if not self.initialized:
raise NotTrained("LVQ network hasn't been trained yet")
X = format_data(X)
subclass_to_class = self.subclass_to_class
weight = self.weight
predictions = []
for input_row in X:
output = euclid_distance(input_row, weight)
winner_subclass = int(output.argmin(axis=1))
predicted_class = subclass_to_class[winner_subclass]
predictions.append(predicted_class)
return np.array(predictions)
def train(self, X_train, y_train, *args, **kwargs):
X_train = format_data(X_train)
y_train = format_data(y_train)
n_input_samples = len(X_train)
if n_input_samples <= self.n_subclasses:
raise ValueError("Number of training input samples should be "
"greater than number of subclasses. Training "
"method received {} input samples."
"".format(n_input_samples))
if not self.initialized:
target_classes = sorted(np.unique(y_train).astype(np.int))
expected_classes = list(range(self.n_classes))
if target_classes != expected_classes:
raise ValueError(
"All classes should be integers from the range [0, {}], "
"but got the following classes instead {}"
"".format(self.n_classes - 1, target_classes))
weights = []
iterator = zip(target_classes, self.prototypes_per_class)
for target_class, n_prototypes in iterator:
is_valid_class = (y_train[:, 0] == target_class)
is_valid_class = is_valid_class.astype('float64')
n_samples_per_class = sum(is_valid_class)
is_valid_class /= n_samples_per_class
if n_samples_per_class <= n_prototypes:
raise ValueError(
"Input data has {0} samples for class-{1}. Number "
"of samples per specified class-{1} should be "
"greater than {2}.".format(
n_samples_per_class, target_class, n_prototypes))
class_weight_indices = np.random.choice(
np.arange(n_input_samples), n_prototypes,
replace=False, p=is_valid_class)
class_weight = X_train[class_weight_indices]
weights.extend(class_weight)
self.weight = np.array(weights)
self.initialized = True
super(LVQ, self).train(X_train, y_train, *args, **kwargs)
def one_training_update(self, X_train, y_train):
weight = self.weight
subclass_to_class = self.subclass_to_class
n_correct_predictions = 0
for input_row, target in zip(X_train, y_train):
step = self.training_step
output = euclid_distance(input_row, weight)
winner_subclass = int(output.argmin())
predicted_class = subclass_to_class[winner_subclass]
weight_update = input_row - weight[winner_subclass, :]
is_correct_prediction = (predicted_class == target).item(0)
if is_correct_prediction:
weight[winner_subclass, :] += step * weight_update
else:
weight[winner_subclass, :] -= step * weight_update
n_correct_predictions += is_correct_prediction
self.n_updates += 1
n_samples = len(X_train)
return 1 - n_correct_predictions / n_samples
class LVQ2(LVQ):
"""
Learning Vector Quantization 2 (LVQ2) algorithm.
Improved version for the LVQ algorithm.
Parameters
----------
epsilon : float
Ration between to closest subclasses that
triggers double weight update. Defaults to ``0.1``.
{LVQ.Parameters}
Notes
-----
{LVQ.Notes}
Examples
--------
>>> import numpy as np
>>> from neupy import algorithms
>>>
>>> X = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [2, 2], [1, 2]])
>>> y = np.array([0, 0, 0, 1, 1, 1])
>>>
>>> lvqnet = algorithms.LVQ2(n_inputs=2, n_classes=2)
>>> lvqnet.train(X, y, epochs=100)
>>> lvqnet.predict([[2, 1], [-1, -1]])
array([1, 0])
"""
epsilon = NumberProperty(default=0.1)
def one_training_update(self, X_train, y_train):
weight = self.weight
epsilon = self.epsilon
subclass_to_class = self.subclass_to_class
n_correct_predictions = 0
for input_row, target in zip(X_train, y_train):
step = self.training_step
output = euclid_distance(input_row, weight)
winner_subclasses = n_argmin(output, n=2, axis=1)
top1_subclass, top2_subclass = winner_subclasses
top1_class = subclass_to_class[top1_subclass]
top2_class = subclass_to_class[top2_subclass]
top1_weight_update = input_row - weight[top1_subclass, :]
is_correct_prediction = (top1_class == target).item(0)
closest_dist, runner_up_dist = output[0, winner_subclasses]
double_update_condition_satisfied = (
not is_correct_prediction and
(top2_class == target) and
closest_dist > ((1 - epsilon) * runner_up_dist) and
runner_up_dist < ((1 + epsilon) * closest_dist)
)
if double_update_condition_satisfied:
top2_weight_update = input_row - weight[top2_class, :]
weight[top1_subclass, :] -= step * top1_weight_update
weight[top2_subclass, :] += step * top2_weight_update
elif is_correct_prediction:
weight[top1_subclass, :] += step * top1_weight_update
else:
weight[top1_subclass, :] -= step * top1_weight_update
n_correct_predictions += is_correct_prediction
n_samples = len(X_train)
return 1 - n_correct_predictions / n_samples
class LVQ21(LVQ2):
"""
Learning Vector Quantization 2.1 (LVQ2.1) algorithm.
Improved version for the LVQ2 algorithm.
Parameters
----------
{LVQ2.Parameters}
Notes
-----
{LVQ2.Notes}
Examples
--------
>>> import numpy as np
>>> from neupy import algorithms
>>>
>>> X = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [2, 2], [1, 2]])
>>> y = np.array([0, 0, 0, 1, 1, 1])
>>>
>>> lvqnet = algorithms.LVQ21(n_inputs=2, n_classes=2)
>>> lvqnet.train(X, y, epochs=100)
>>> lvqnet.predict([[2, 1], [-1, -1]])
array([1, 0])
"""
def one_training_update(self, X_train, y_train):
weight = self.weight
epsilon = self.epsilon
subclass_to_class = self.subclass_to_class
n_correct_predictions = 0
for input_row, target in zip(X_train, y_train):
step = self.training_step
output = euclid_distance(input_row, weight)
winner_subclasses = n_argmin(output, n=2, axis=1)
top1_subclass, top2_subclass = winner_subclasses
top1_class = subclass_to_class[top1_subclass]
top2_class = subclass_to_class[top2_subclass]
top1_weight_update = input_row - weight[top1_subclass, :]
is_correct_prediction = (top1_class == target).item(0)
closest_dist, runner_up_dist = output[0, winner_subclasses]
double_update_condition_satisfied = (
(
(top1_class == target and top2_class != target) or
(top1_class != target and top2_class == target)
) and
closest_dist > ((1 - epsilon) * runner_up_dist) and
runner_up_dist < ((1 + epsilon) * closest_dist)
)
if double_update_condition_satisfied:
top2_weight_update = input_row - weight[top2_class, :]
if is_correct_prediction:
weight[top2_subclass, :] -= step * top2_weight_update
weight[top1_subclass, :] += step * top1_weight_update
else:
weight[top1_subclass, :] -= step * top1_weight_update
weight[top2_subclass, :] += step * top2_weight_update
elif is_correct_prediction:
weight[top1_subclass, :] += step * top1_weight_update
else:
weight[top1_subclass, :] -= step * top1_weight_update
n_correct_predictions += is_correct_prediction
self.n_updates += 1
n_samples = len(X_train)
return 1 - n_correct_predictions / n_samples
class LVQ3(LVQ21):
"""
Learning Vector Quantization 3 (LVQ3) algorithm.
Improved version for the LVQ2.1 algorithm.
Parameters
----------
{LVQ.n_inputs}
{LVQ.n_subclasses}
{LVQ.n_classes}
{LVQ.prototypes_per_class}
{LVQ2.epsilon}
slowdown_rate : float
Paremeter scales learning step in order to decrease it
in case if the two closest subclasses predict target
value correctly. Defaults to ``0.4``.
step : float
Learning rate, defaults to ``0.01``.
{BaseNetwork.show_epoch}
{BaseNetwork.shuffle_data}
{BaseNetwork.signals}
{Verbose.verbose}
Notes
-----
{LVQ21.Notes}
- Decreasing step and increasing number of training epochs
can improve the performance.
Examples
--------
>>> import numpy as np
>>> from neupy import algorithms
>>>
>>> X = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [2, 2], [1, 2]])
>>> y = np.array([0, 0, 0, 1, 1, 1])
>>>
>>> lvqnet = algorithms.LVQ3(n_inputs=2, n_classes=2)
>>> lvqnet.train(X, y, epochs=100)
>>> lvqnet.predict([[2, 1], [-1, -1]])
array([1, 0])
"""
step = NumberProperty(minval=0, default=0.01)
slowdown_rate = NumberProperty(minval=0, default=0.4)
def one_training_update(self, X_train, y_train):
weight = self.weight
epsilon = self.epsilon
slowdown_rate = self.slowdown_rate
subclass_to_class = self.subclass_to_class
n_correct_predictions = 0
for input_row, target in zip(X_train, y_train):
step = self.training_step
output = euclid_distance(input_row, weight)
winner_subclasses = n_argmin(output, n=2, axis=1)
top1_subclass, top2_subclass = winner_subclasses
top1_class = subclass_to_class[top1_subclass]
top2_class = subclass_to_class[top2_subclass]
top1_weight_update = input_row - weight[top1_subclass, :]
is_first_correct = (top1_class == target).item(0)
is_second_correct = (top2_class == target).item()
closest_dist, runner_up_dist = output[0, winner_subclasses]
double_update_condition_satisfied = (
(
(is_first_correct and not is_second_correct) or
(is_second_correct and not is_first_correct)
) and
closest_dist > ((1 - epsilon) * runner_up_dist) and
runner_up_dist < ((1 + epsilon) * closest_dist)
)
two_closest_correct_condition_satisfied = (
is_first_correct and is_second_correct and
closest_dist > ((1 - epsilon) * (1 + epsilon) * runner_up_dist)
)
if double_update_condition_satisfied:
top2_weight_update = input_row - weight[top2_class, :]
if is_first_correct:
weight[top1_subclass, :] += step * top1_weight_update
weight[top2_subclass, :] -= step * top2_weight_update
else:
weight[top1_subclass, :] -= step * top1_weight_update
weight[top2_subclass, :] += step * top2_weight_update
elif two_closest_correct_condition_satisfied:
beta = step * slowdown_rate
top2_weight_update = input_row - weight[top2_class, :]
weight[top1_subclass, :] += beta * top1_weight_update
weight[top2_subclass, :] += beta * top2_weight_update
else:
weight[top1_subclass, :] -= step * top1_weight_update
n_correct_predictions += is_first_correct
self.n_updates += 1
n_samples = len(X_train)
return 1 - n_correct_predictions / n_samples
|
|
# -*- python -*-
#
# This file is part of the CNO package
#
# Copyright (c) 2012-2014 - EMBL-EBI
#
# File author(s): Thomas Cokelaer (cokelaer@ebi.ac.uk)
#
# Distributed under the GLPv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# website: http://github.com/cellnopt/cellnopt
#
##############################################################################
"""This module contains a base class to manipulate reactions
.. testsetup:: reactions
from cno import Reaction
from cno.io.reactions import Reactions
r = Reactions()
"""
from __future__ import print_function
import re
from cno.misc import CNOError
__all__ = ["Reaction", "Reactions"]
class ReactionBase(object):
valid_symbols = ["+", "!", "&", "^"]
and_symbol = "^"
class Reaction(str, ReactionBase):
"""Logical Reaction
A Reaction can encode logical ANDs and ORs as well as NOT::
>>> from cno import Reaction
>>> r = Reaction("A+B=C") # a OR reaction
>>> r = Reaction("A^B=C") # an AND reaction
>>> r = Reaction("A&B=C") # an AND reaction
>>> r = Reaction("C=D") # an activation
>>> r = Reaction("!D=E") # a NOT reaction
The syntax is as follows:
#. The **!** sign indicates a logical NOT.
#. The **+** sign indicates a logical OR.
#. The **=** sign indicates a relation (edge).
#. The **^** or **&** signs indicate a logical AND. Note that **&** signs
will be replaced by **^**.
Internally, reactions are checked for validity (e.g., !=C is invalid).
You can reset the name::
>>> r.name = "A+B+C=D"
or create an instance from another instance::
>>> newr = Reaction(r)
Sorting can be done inplace (default) or not. ::
>>> r = Reaction("F+D^!B+!A=Z")
>>> r.sort(inplace=False)
'!A+!B^D+F=Z'
Simple operator (e.g., equality) are available. Note that equality will sort the species
internally so A+B=C would be equal to B+A=C and there is no need to call :meth:`sort`::
>>> r = Reaction("F+D^!B+!A=Z")
>>> r == '!A+!B^D+F=Z'
True
If a reaction **A+A=B** is provided, it can be simplified by calling :meth:`simplify`.
ANDs operator are not simplified. More sophisticated simplifications using Truth
Table could be used but will not be implemented in this class for now.
"""
# use __new__ to inherit from str class.
def __new__(cls, reaction=None, strict_rules=True):
"""
:param str reaction: a valid reaction (e.g., A=B, A+B=C, !B=D, C^D=F, ...),
or an instance of :class:`Reaction`.
:param bool strict_rules: if True, reactions cannot start with =, ^ or +
signs (default to True).
"""
# Since __init__ is called after the object is constructed,
# it is too late to modify the value for immutable types.
# Note that __new__ is a classmethod,
self = str.__new__(cls, reaction)
self._strict_rules = strict_rules
# since strings are immutable, we use this attribute to play around
# with the name
self._name = None
if reaction is not None:
# could be a Reaction instance
if hasattr(reaction, "name"):
self.name = reaction.name[:]
# or a string
# no unicode to be python3 compatible.
elif isinstance(reaction, (str)):
self.name = reaction[:]
else:
raise CNOError("neither a string nor a Reaction instance")
return self
def _set_name(self, reaction):
if reaction is not None:
reaction = self._valid_reaction(reaction)
self._name = reaction[:]
def _get_name(self):
return self._name
name = property(_get_name, _set_name,
doc="Getter/Setter for the reaction name")
def _get_species(self, reac=None):
"""
.. doctest:: reactions
>>> r = Reaction("!a+c^d^e^f+!h=b")
>>> r.species
['a', 'c', 'd', 'e', 'f', 'h', 'b']
"""
if reac is None:
reac = self.name[:]
species = re.split("[+|=|^|!]", reac)
species = [x for x in species if x]
return species
species = property(_get_species)
def get_signed_lhs_species(self):
lhs = self.lhs[:]
species = re.split("[+|^]", lhs)
pos = [x for x in species if x.startswith("!") is False]
neg = [x[1:] for x in species if x.startswith("!") is True]
return {'-': neg, '+': pos}
def _get_lhs(self):
return self.name.split("=")[0]
lhs = property(_get_lhs,
doc="Getter for the left hand side of the = character")
def _get_lhs_species(self):
lhs = self.name.split("=")[0]
species = self._get_species(reac=lhs)
return species
lhs_species = property(_get_lhs_species,
doc="Getter for the list of species on the left hand side of the = character")
def _get_rhs(self):
return self.name.split("=")[1]
rhs = property(_get_rhs,
doc="Getter for the right hand side of the = character")
# FIXME does not make sense if A^!B^C=D ??
def _get_sign(self):
# if we have an AND gate, for instance A^B=C
if "!" in self.name and self.and_symbol not in self.name:
return "-1"
else:
return "1"
sign = property(_get_sign, doc="return sign of the reaction")
def _valid_reaction(self, reaction):
reaction = reaction.strip()
reaction = reaction.replace("&", self.and_symbol)
# = sign is compulsory
N = reaction.count("=")
if N != 1:
raise CNOError("Invalid reaction name (only one = character expected. found {0})".format(N))
#
if self._strict_rules:
if reaction[0] in ["=", "^", "+"]:
raise CNOError("Reaction (%s) cannot start with %s" %
(reaction, "=, ^, +"))
#
lhs, rhs = reaction.split("=")
for this in self.valid_symbols:
if this in rhs:
raise CNOError("Found an unexpected character (%s) in the LHS of reactions %s" %
(reaction, self.valid_symbols))
if reaction.startswith("="):
pass
else:
species = re.split("[+|^]", reaction.split("=")[0])
if "" in species:
raise CNOError("Reaction (%s) has two many + or ^ signs" % reaction)
# Finally, a ! must be preceded by either nothing or a special sign but not another species
# e.g. A!B does not make sense.
for i, x in enumerate(species):
if x == "!" and i!=0:
if species[i-1] not in self.valid_symbols:
raise CNOError("Reaction (%s) may be ill-formed with incorrect ! not preceded by another reaction ")
return reaction
def ands2ors(self, reaction):
reaction = Reaction(reaction)
lhs = reaction.get_signed_lhs_species()
rhs = reaction.rhs
reactions = [x + "=" + rhs for x in lhs['+']]
reactions += ["!" + x + "=" + rhs for x in lhs['-']]
return reactions
def sort(self, inplace=True):
"""Rearrange species in alphabetical order
:param bool inplace: defaults to True
::
>>> r = Reaction("F+D^!B+!A=Z")
>>> r.sort()
>>> r
'!A+!B^D+F=Z'
"""
# if only one lhs, nothing to do
if len(self.lhs_species) == 1:
return
# we first need to split + and then ^
splitted_ors = [x for x in self.lhs.split("+")] # left species keeping ! sign
# loop over split list searching for ANDs
species = []
for this in splitted_ors:
species_ands = this.split("^")
# sort the species within the ANDs
species_ands = sorted(species_ands, key=lambda x: x.replace("!", ""))
species_ands = "^".join(species_ands)
species.append(species_ands)
# now sort the ORs
species = sorted(species, key=lambda x: x.replace("!", ""))
# and finally rejoin them
species = "+".join(species)
new_reac = "=".join([species, self.rhs])
if inplace is True:
self.name = new_reac
else:
return new_reac
def simplify(self, inplace=True):
"""Simplifies reaction if possible.
::
>>> r = Reaction("A+A=B")
>>> r.simplify()
>>> r
"A=B"
Other cases (with ANDs) are not simplified. Even though **A+A^B=C** truth table
could be simplified to **A=C** but we will not simplified it for now.
"""
lhs = "+".join(set(self.lhs.split("+")))
name = "=".join([lhs, self.rhs])
if inplace:
self.name = name
else:
return name
def _rename_one_species(self, lhs, k, v):
symbols = ['+', '^', '!', '=']
new_name = ''
current_species = ''
# LHS
for x in lhs:
# each time there is a symbol found, we will read a new species
if x in symbols:
# the current species should now be added to the new_name
if current_species == k:
new_name += v
else:
new_name += current_species
current_species = ''
new_name += x
else:
current_species += x
# RHS: in principle current_species should be the RHS
if current_species == k:
new_name += v
else:
new_name += current_species
return new_name
def rename_species(self, mapping={}):
for k, v in mapping.items():
self.name = self._rename_one_species(self.name, k, v)
def __repr__(self):
# str needs to be overwritten otherwise, _name is not used but the default __repr__
# if one call sort(), then, calling the variable name raise the wrong values,
# _name but the internal attribute from the str object.
return self._name
def __eq__(self, other):
# The reaction may not be sorted and user may not want it to be sorted,
# so we create a new instance and sort it
r1 = Reaction(self)
r1.sort()
# we also sort the input reaction creating an instance as well so that the input reaction
# (if it is an object) will not be sorted inplace either
r2 = Reaction(other)
r2.sort()
if r1.name == r2.name:
return True
else:
return False
class Reactions(ReactionBase):
"""Data structure to handle list of :class:`Reaction` instances
For the syntax of a reaction, see :class:`Reaction`. You can
use the **=**, **!**, **+** and **^** characters.
Reactions can be added using either string or instances of :class:`Reaction`::
>>> from cno import Reaction, Reactions
>>> r = Reactions()
>>> r.add_reaction("A+B=C") # a OR reaction
>>> r.add_reaction("A^B=C") # an AND reaction
>>> r.add_reaction("A&B=C") # an AND reaction
>>> r.add_reaction("C=D") # an activation
>>> r.add_reaction("!D=E") # a NOT reaction
>>> r.add_reaction(Reaction("F=G")) # a NOT reaction
Now, we can get the species::
>>> r.species
['A', 'B', 'C', 'D', 'E']
Remove one::
>>> r.remove_species("A")
>>> r.reactions
["B=C", "C=D", "!D=E"]
.. note:: there is no simplifications made on reactions. For instance, if you add A=B and then
A+B=C, A=B is redundant but will be kept.
.. seealso:: :class:`cno.io.reactions.Reaction` and :class:`cno.io.sif.SIF`
"""
def __init__(self, reactions=[], strict_rules=True, verbose=False):
super(Reactions, self).__init__()
self.strict_rules = strict_rules
# !! use a copy
self._reactions = []
self.add_reactions(reactions)
self.verbose = verbose
def to_list(self):
"""Return list of reaction names"""
return [x.name for x in self._reactions]
def _get_species(self):
"""Extract the specID out of reacID"""
# extract species from all reactions and add to a set
species = [this for reaction in self._reactions for this in reaction.species]
species = set(species)
# sort (transformed to a list)
species = sorted(species)
return species
species = property(_get_species, doc="return list of unique species")
def _get_reaction_names(self):
return [reaction.name for reaction in self._reactions]
reactions = property(fget=_get_reaction_names, doc="return list of reaction names")
def __str__(self):
_str = "Reactions() instance:\n"
_str += "- %s reactions\n" % len(self.reactions)
_str += "- %s species\n" % len(self.species)
return _str
def remove_species(self, species_to_remove):
"""Removes species from the list of reactions
:param str,list species_to_remove:
.. note:: If a reaction is "a+b=c" and you remove specy "a",
then the reaction is not enterely removed but replace by "b=c"
"""
# make sure we have a **list** of species to remove
if isinstance(species_to_remove, list):
pass
elif isinstance(species_to_remove, str):
species_to_remove = [species_to_remove]
else:
raise TypeError("species_to_remove must be a list or string")
reacIDs_toremove = []
reacIDs_toadd = []
for reac in self._reactions:
lhs = reac.lhs_species # lhs without ! sign
rhs = reac.rhs
# if RHS contains a species to remove, the entire reaction can be removed
if rhs in species_to_remove:
reacIDs_toremove.append(reac.name)
continue
# otherwise, we need to look at the LHS. If the LHS is of length 1,
# we are in the first case (a=b) and it LHS contains specy to
# remove, we do not want to keep it.
if len(lhs) == 1:
if lhs[0] in species_to_remove:
reacIDs_toremove.append(reac.name)
continue
# Finally, if LHS contains 2 species or more, separated by + sign,
# we do no want to remove the entire reaction but only the
# relevant species. So to remove a in "a+b=c", we should return "b=c"
# taking care of ! signs as well.
for symbol in ["+", "^"]:
if symbol not in reac.name:
continue
else:
lhs_with_neg = [x for x in reac.name.split("=")[0].split(symbol)]
new_lhs = symbol.join([x for x in lhs_with_neg if x.replace("!", "") not in species_to_remove])
if len(new_lhs):
new_reac = new_lhs + "=" + rhs
reacIDs_toremove.append(reac.name)
reacIDs_toadd.append(new_reac)
#
for reac in reacIDs_toremove:
self.remove_reaction(reac)
for reac in reacIDs_toadd:
self.add_reaction(reac)
def rename_species(self, mapping={}):
"""Rename species in all reactions
:param dict mapping: The mapping between old and new names
"""
for r in self._reactions:
r.rename_species(mapping)
def add_reactions(self, reactions):
"""Add a list of reactions
:param list reactions: list of reactions or strings
"""
for reac in reactions:
self.add_reaction(Reaction(reac))
def add_reaction(self, reaction):
"""Adds a reaction in the list of reactions
See documentation of the :class:`Reaction` class for details. Here are
some valid reactions::
a=b
a+c=d
a^b=e # same as above
!a=e
Example:
.. doctest::
>>> from cno import Reactions
>>> c = Reactions()
>>> c.add_reaction("a=b")
>>> assert len(c.reactions) == 1
"""
reac = Reaction(reaction, strict_rules=self.strict_rules)
reac.sort()
if reac.name not in self.to_list():
self._reactions.append(reac)
else:
print("Reaction %s already in the list of reactions" % reaction)
def remove_reaction(self, reaction_name):
"""Remove a reaction from the reacID list
>>> c = Reactions()
>>> c.add_reaction("a=b")
>>> assert len(c.reactions) == 1
>>> c.remove_reaction("a=b")
>>> assert len(c.reactions) == 0
"""
names = [x.name for x in self._reactions]
if reaction_name in names:
index2remove = names.index(reaction_name)
del self._reactions[index2remove]
else:
if self.verbose:
print("Reaction {0} not found. Nothing done".format(reaction_name))
def search(self, species, strict=False):
"""Prints and returns reactions that contain the species name
:param str species: name to look for
:param bool strict: decompose reactions to search for the species
:return: a Reactions instance with reactions containing the species to search for
"""
r = Reactions()
for x in self._reactions:
list_species = x.lhs_species
if strict == True:
for this in list_species:
if species.lower() == this.lower():
if self.verbose:
print("Adding {0}".format(x.name))
r.add_reaction(x.name)
else:
for this in list_species:
if species.lower() in this.lower():
if self.verbose:
print("Adding {0}".format(x.name))
r.add_reaction(x.name)
continue
return r
def __len__(self):
return len(self._reactions)
|
|
'''
Copyright (c) 2011-2014, Agora Games, LLC All rights reserved.
https://github.com/agoragames/haigha/blob/master/LICENSE.txt
'''
from chai import Chai
from collections import deque
from haigha import channel
from haigha.channel import Channel, SyncWrapper
from haigha.exceptions import ChannelError, ChannelClosed, ConnectionClosed
from haigha.classes.basic_class import BasicClass
from haigha.classes.channel_class import ChannelClass
from haigha.classes.exchange_class import ExchangeClass
from haigha.classes.queue_class import QueueClass
from haigha.classes.transaction_class import TransactionClass
from haigha.classes.protocol_class import ProtocolClass
from haigha.frames.method_frame import MethodFrame
from haigha.frames.heartbeat_frame import HeartbeatFrame
from haigha.frames.header_frame import HeaderFrame
from haigha.frames.content_frame import ContentFrame
class SyncWrapperTest(Chai):
def test_init(self):
s = SyncWrapper('cb')
assert_equals('cb', s._cb)
assert_true(s._read)
assert_equals(None, s._result)
def test_eq_when_other_is_same_cb(self):
s = SyncWrapper('cb')
assert_equals('cb', s)
assert_not_equals('bb', s)
def test_eq_when_other_has_same_cb(self):
s = SyncWrapper('cb')
other = SyncWrapper('cb')
another = SyncWrapper('bb')
assert_equals(s, other)
assert_not_equals(s, another)
def test_call(self):
cb = mock()
s = SyncWrapper(cb)
expect(cb).args('foo', 'bar', hello='mars')
s('foo', 'bar', hello='mars')
assert_false(s._read)
class ChannelTest(Chai):
def test_init(self):
c = Channel('connection', 'id', {
20: ChannelClass,
40: ExchangeClass,
50: QueueClass,
60: BasicClass,
90: TransactionClass,
})
assert_equals('connection', c._connection)
assert_equals('id', c._channel_id)
assert_true(isinstance(c.channel, ChannelClass))
assert_true(isinstance(c.exchange, ExchangeClass))
assert_true(isinstance(c.queue, QueueClass))
assert_true(isinstance(c.basic, BasicClass))
assert_true(isinstance(c.tx, TransactionClass))
assert_false(c._synchronous)
assert_equals(c._class_map[20], c.channel)
assert_equals(c._class_map[40], c.exchange)
assert_equals(c._class_map[50], c.queue)
assert_equals(c._class_map[60], c.basic)
assert_equals(c._class_map[90], c.tx)
assert_equals(deque([]), c._pending_events)
assert_equals(deque([]), c._frame_buffer)
assert_equals(set([]), c._open_listeners)
assert_equals(set([]), c._close_listeners)
assert_false(c._closed)
assert_equals(
{
'reply_code': 0,
'reply_text': 'first connect',
'class_id': 0,
'method_id': 0
}, c._close_info)
assert_true(c._active)
c = Channel('connection', 'id', {
20: ChannelClass,
40: ExchangeClass,
50: QueueClass,
60: BasicClass,
90: TransactionClass,
}, synchronous=True)
assert_true(c._synchronous)
def test_properties(self):
connection = mock()
connection.logger = 'logger'
connection.synchronous = False
c = Channel(connection, 'id', {})
c._closed = 'yes'
c._close_info = 'ithappened'
c._active = 'record'
assert_equals(connection, c.connection)
assert_equals('id', c.channel_id)
assert_equals('logger', c.logger)
assert_equals('yes', c.closed)
assert_equals('ithappened', c.close_info)
assert_equals('record', c.active)
assert_false(c.synchronous)
c._closed = False
assert_equals(None, c.close_info)
connection.synchronous = False
c = Channel(connection, 'id', {}, synchronous=True)
assert_true(c.synchronous)
connection.synchronous = True
c = Channel(connection, 'id', {})
assert_true(c.synchronous)
connection.synchronous = True
c = Channel(connection, 'id', {}, synchronous=False)
assert_true(c.synchronous)
def test_add_open_listener(self):
c = Channel(None, None, {})
c.add_open_listener('foo')
assert_equals(set(['foo']), c._open_listeners)
def test_remove_open_listener(self):
c = Channel(None, None, {})
c.add_open_listener('foo')
c.remove_open_listener('foo')
c.remove_open_listener('bar')
assert_equals(set([]), c._open_listeners)
def test_notify_open_listeners(self):
c = Channel(None, None, {})
cb1 = mock()
cb2 = mock()
c._open_listeners = set([cb1, cb2])
expect(cb1).args(c)
expect(cb2).args(c)
c._notify_open_listeners()
def test_add_close_listener(self):
c = Channel(None, None, {})
c.add_close_listener('foo')
assert_equals(set(['foo']), c._close_listeners)
def test_remove_close_listener(self):
c = Channel(None, None, {})
c.add_close_listener('foo')
c.remove_close_listener('foo')
c.remove_close_listener('bar')
assert_equals(set([]), c._close_listeners)
def test_notify_close_listeners(self):
c = Channel(None, None, {})
cb1 = mock()
cb2 = mock()
c._close_listeners = set([cb1, cb2])
expect(cb1).args(c)
expect(cb2).args(c)
c._notify_close_listeners()
def test_open(self):
c = Channel(None, None, {})
expect(mock(c, 'channel').open)
c.open()
def test_active(self):
c = Channel(None, None, {})
expect(mock(c, 'channel').open)
c.open()
assertTrue(c.active)
def test_close_with_no_args(self):
c = Channel(None, None, {})
expect(mock(c, 'channel').close).args(0, '', 0, 0)
c.close()
def test_close_with_args(self):
c = Channel(None, None, {})
expect(mock(c, 'channel').close).args(1, 'two', 3, 4)
expect(c.channel.close).args(1, 'two', 3, 4)
c.close(1, 'two', 3, 4)
c.close(reply_code=1, reply_text='two', class_id=3, method_id=4)
def test_close_when_channel_attr_cleared(self):
c = Channel(None, None, {})
assert_false(hasattr(c, 'channel'))
c.close()
def test_publish(self):
c = Channel(None, None, {})
expect(mock(c, 'basic').publish).args('arg1', 'arg2', foo='bar')
c.publish('arg1', 'arg2', foo='bar')
def test_publish_synchronous(self):
c = Channel(None, None, {})
expect(mock(c, 'tx').select)
expect(mock(c, 'basic').publish).args('arg1', 'arg2', foo='bar')
expect(c.tx.commit).args(cb='a_cb')
c.publish_synchronous('arg1', 'arg2', foo='bar', cb='a_cb')
def test_dispatch(self):
c = Channel(None, None, {})
frame = mock()
frame.class_id = 32
klass = mock()
c._class_map[32] = klass
expect(klass.dispatch).args(frame)
c.dispatch(frame)
frame.class_id = 33
assert_raises(Channel.InvalidClass, c.dispatch, frame)
def test_buffer_frame(self):
c = Channel(None, None, {})
c.buffer_frame('f1')
c.buffer_frame('f2')
assert_equals(deque(['f1', 'f2']), c._frame_buffer)
def test_process_frames_when_no_frames(self):
# Not that this should ever happen, but to be sure
c = Channel(None, None, {})
stub(c.dispatch)
c.process_frames()
def test_process_frames_stops_when_buffer_is_empty(self):
c = Channel(None, None, {})
f0 = MethodFrame('ch_id', 'c_id', 'm_id')
f1 = MethodFrame('ch_id', 'c_id', 'm_id')
c._frame_buffer = deque([f0, f1])
expect(c.dispatch).args(f0)
expect(c.dispatch).args(f1)
c.process_frames()
assert_equals(deque(), c._frame_buffer)
def test_process_frames_stops_when_frameunderflow_raised(self):
c = Channel(None, None, {})
f0 = MethodFrame('ch_id', 'c_id', 'm_id')
f1 = MethodFrame('ch_id', 'c_id', 'm_id')
c._frame_buffer = deque([f0, f1])
expect(c.dispatch).args(f0).raises(ProtocolClass.FrameUnderflow)
c.process_frames()
assert_equals(f1, c._frame_buffer[0])
def test_process_frames_when_connectionclosed_on_dispatch(self):
c = Channel(None, None, {})
c._connection = mock()
c._connection.logger = mock()
f0 = MethodFrame(20, 30, 40)
f1 = MethodFrame('ch_id', 'c_id', 'm_id')
c._frame_buffer = deque([f0, f1])
expect(c.dispatch).args(f0).raises(
ConnectionClosed('something darkside'))
stub(c.close) # assert not called
assert_raises(ConnectionClosed, c.process_frames)
def test_process_frames_logs_and_closes_when_dispatch_error_raised(self):
c = Channel(None, None, {})
c._connection = mock()
c._connection.logger = mock()
f0 = MethodFrame(20, 30, 40)
f1 = MethodFrame('ch_id', 'c_id', 'm_id')
c._frame_buffer = deque([f0, f1])
expect(c.dispatch).args(f0).raises(RuntimeError("zomg it broked"))
expect(c.close).args(500, 'Failed to dispatch %s' % (str(f0)))
assert_raises(RuntimeError, c.process_frames)
assert_equals(f1, c._frame_buffer[0])
def test_process_frames_logs_and_closes_when_dispatch_error_raised_even_when_exception_on_close(self):
c = Channel(None, None, {})
c._connection = mock()
c._connection.logger = mock()
f0 = MethodFrame(20, 30, 40)
f1 = MethodFrame('ch_id', 'c_id', 'm_id')
c._frame_buffer = deque([f0, f1])
expect(c.dispatch).args(f0).raises(RuntimeError("zomg it broked"))
expect(c.close).raises(ValueError())
assert_raises(RuntimeError, c.process_frames)
assert_equals(f1, c._frame_buffer[0])
def test_process_frames_logs_and_closes_when_systemexit_raised(self):
c = Channel(None, None, {})
c._connection = mock()
c._connection.logger = mock()
f0 = MethodFrame(20, 30, 40)
f1 = MethodFrame('ch_id', 'c_id', 'm_id')
c._frame_buffer = deque([f0, f1])
expect(c.dispatch).args(f0).raises(SystemExit())
stub(c.close)
assert_raises(SystemExit, c.process_frames)
assert_equals(f1, c._frame_buffer[0])
def test_next_frame_with_a_frame(self):
c = Channel(None, None, {})
ch_id, c_id, m_id = 0, 1, 2
f0 = MethodFrame(ch_id, c_id, m_id)
f1 = MethodFrame(ch_id, c_id, m_id)
c._frame_buffer = deque([f0, f1])
assert_equals(c.next_frame(), f0)
def test_next_frame_with_no_frames(self):
c = Channel(None, None, {})
c._frame_buffer = deque()
assert_equals(c.next_frame(), None)
def test_requeue_frames(self):
c = Channel(None, None, {})
ch_id, c_id, m_id = 0, 1, 2
f = [MethodFrame(ch_id, c_id, m_id) for i in xrange(4)]
c._frame_buffer = deque(f[:2])
c.requeue_frames(f[2:])
assert_equals(c._frame_buffer, deque([f[i] for i in [3, 2, 0, 1]]))
def test_send_frame_when_not_closed_no_flow_control_no_pending_events(self):
conn = mock()
c = Channel(conn, 32, {})
expect(conn.send_frame).args('frame')
c.send_frame('frame')
def test_send_frame_when_not_closed_no_flow_control_pending_event(self):
conn = mock()
c = Channel(conn, 32, {})
c._pending_events.append('cb')
c.send_frame('frame')
assert_equals(deque(['cb', 'frame']), c._pending_events)
def test_send_frame_when_not_closed_and_flow_control(self):
conn = mock()
c = Channel(conn, 32, {})
c._active = False
method = MethodFrame(1, 2, 3)
heartbeat = HeartbeatFrame()
header = HeaderFrame(1, 2, 3, 4)
content = ContentFrame(1, 'foo')
expect(conn.send_frame).args(method)
expect(conn.send_frame).args(heartbeat)
c.send_frame(method)
c.send_frame(heartbeat)
assert_raises(Channel.Inactive, c.send_frame, header)
assert_raises(Channel.Inactive, c.send_frame, content)
def test_send_frame_when_closed_for_a_reason(self):
conn = mock()
c = Channel(conn, 32, {})
c._closed = True
c._close_info = {'reply_code': 42, 'reply_text': 'bad'}
assert_raises(ChannelClosed, c.send_frame, 'frame')
def test_send_frame_when_closed_for_no_reason(self):
conn = mock()
c = Channel(conn, 32, {})
c._closed = True
c._close_info = {'reply_code': 42, 'reply_text': ''}
assert_raises(ChannelClosed, c.send_frame, 'frame')
def test_add_synchronous_cb_when_transport_asynchronous(self):
conn = mock()
conn.synchronous = False
c = Channel(conn, None, {})
assert_equals(deque([]), c._pending_events)
c.add_synchronous_cb('foo')
assert_equals(deque(['foo']), c._pending_events)
def test_add_synchronous_cb_when_transport_asynchronous_but_channel_synchronous(self):
conn = mock()
conn.synchronous = False
c = Channel(conn, None, {}, synchronous=True)
wrapper = mock()
wrapper._read = True
wrapper._result = 'done'
expect(channel.SyncWrapper).args('foo').returns(wrapper)
expect(conn.read_frames)
expect(conn.read_frames).side_effect(
lambda: setattr(wrapper, '_read', False))
assert_equals(deque([]), c._pending_events)
assert_equals('done', c.add_synchronous_cb('foo'))
# This is technically cleared in runtime, but assert that it's not cleared
# in this method
assert_equals(deque([wrapper]), c._pending_events)
def test_add_synchronous_cb_when_transport_synchronous(self):
conn = mock()
conn.synchronous = True
c = Channel(conn, None, {})
wrapper = mock()
wrapper._read = True
wrapper._result = 'done'
expect(channel.SyncWrapper).args('foo').returns(wrapper)
expect(conn.read_frames)
expect(conn.read_frames).side_effect(
lambda: setattr(wrapper, '_read', False))
assert_equals(deque([]), c._pending_events)
assert_equals('done', c.add_synchronous_cb('foo'))
# This is technically cleared in runtime, but assert that it's not cleared
# in this method
assert_equals(deque([wrapper]), c._pending_events)
def test_add_synchronous_cb_when_transport_synchronous_and_channel_closes(self):
conn = mock()
conn.synchronous = True
c = Channel(conn, None, {})
wrapper = mock()
wrapper._read = True
wrapper._result = 'done'
expect(channel.SyncWrapper).args('foo').returns(wrapper)
expect(conn.read_frames)
expect(conn.read_frames).side_effect(
lambda: setattr(c, '_closed', True))
with assert_raises(ChannelClosed):
c.add_synchronous_cb('foo')
def test_clear_synchronous_cb_when_no_pending(self):
c = Channel(None, None, {})
stub(c._flush_pending_events)
assert_equals(deque([]), c._pending_events)
assert_equals('foo', c.clear_synchronous_cb('foo'))
def test_clear_synchronous_cb_when_pending_cb_matches(self):
c = Channel(None, None, {})
c._pending_events = deque(['foo'])
expect(c._flush_pending_events)
assert_equals('foo', c.clear_synchronous_cb('foo'))
assert_equals(deque([]), c._pending_events)
def test_clear_synchronous_cb_when_pending_cb_doesnt_match_but_isnt_in_list(self):
c = Channel(None, None, {})
c._pending_events = deque(['foo'])
expect(c._flush_pending_events)
assert_equals('bar', c.clear_synchronous_cb('bar'))
assert_equals(deque(['foo']), c._pending_events)
def test_clear_synchronous_cb_when_pending_cb_doesnt_match_but_isnt_in_list(self):
c = Channel(None, None, {})
stub(c._flush_pending_events)
c._pending_events = deque(['foo', 'bar'])
assert_raises(ChannelError, c.clear_synchronous_cb, 'bar')
assert_equals(deque(['foo', 'bar']), c._pending_events)
def test_flush_pending_events_flushes_all_leading_frames(self):
conn = mock()
c = Channel(conn, 42, {})
f1 = MethodFrame(1, 2, 3)
f2 = MethodFrame(1, 2, 3)
f3 = MethodFrame(1, 2, 3)
c._pending_events = deque([f1, f2, 'cb', f3])
expect(conn.send_frame).args(f1)
expect(conn.send_frame).args(f2)
c._flush_pending_events()
assert_equals(deque(['cb', f3]), c._pending_events)
def test_closed_cb_without_final_frame(self):
c = Channel('connection', None, {
20: ChannelClass,
40: ExchangeClass,
50: QueueClass,
60: BasicClass,
90: TransactionClass,
})
c._pending_events = 'foo'
c._frame_buffer = 'foo'
for val in c._class_map.values():
expect(val._cleanup)
expect(c._notify_close_listeners)
c._closed_cb()
assert_equals(deque([]), c._pending_events)
assert_equals(deque([]), c._frame_buffer)
assert_equals(None, c._connection)
assert_false(hasattr(c, 'channel'))
assert_false(hasattr(c, 'exchange'))
assert_false(hasattr(c, 'queue'))
assert_false(hasattr(c, 'basic'))
assert_false(hasattr(c, 'tx'))
assert_equals(None, c._class_map)
assert_equals(set(), c._close_listeners)
def test_closed_cb_with_final_frame(self):
conn = mock()
c = Channel(conn, None, {})
expect(conn.send_frame).args('final')
for val in c._class_map.values():
expect(val._cleanup)
c._closed_cb('final')
|
|
# Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from fnmatch import fnmatchcase
import os
import re
import stat
import time
import glob
from .config import (Configuration, IntegerEntry, NewlineEntry, StringEntry,
TimeEntry)
class SSHClientException(RuntimeError):
pass
class _ClientConfiguration(Configuration):
def __init__(self, host, alias, port, timeout, newline, prompt, term_type,
width, height, path_separator, encoding):
super(_ClientConfiguration, self).__init__(
index=IntegerEntry(None),
host=StringEntry(host),
alias=StringEntry(alias),
port=IntegerEntry(port),
timeout=TimeEntry(timeout),
newline=NewlineEntry(newline),
prompt=StringEntry(prompt),
term_type=StringEntry(term_type),
width=IntegerEntry(width),
height=IntegerEntry(height),
path_separator=StringEntry(path_separator),
encoding=StringEntry(encoding)
)
class AbstractSSHClient(object):
"""Base class for the SSH client implementation.
This class defines the public API. Subclasses (:py:class:`pythonclient.
PythonSSHClient` and :py:class:`javaclient.JavaSSHClient`) provide the
language specific concrete implementations.
"""
def __init__(self, host, alias=None, port=22, timeout=3, newline='LF',
prompt=None, term_type='vt100', width=80, height=24,
path_separator='/', encoding='utf8'):
self.config = _ClientConfiguration(host, alias, port, timeout, newline,
prompt, term_type, width, height,
path_separator, encoding)
self._sftp_client = None
self._shell = None
self._started_commands = []
self.client = self._get_client()
def _get_client(self):
raise NotImplementedError('This should be implemented in the subclass.')
@staticmethod
def enable_logging(path):
"""Enables logging of SSH events to a file.
:param str path: Path to the file the log is written to.
:returns: `True`, if logging was successfully enabled. False otherwise.
"""
raise NotImplementedError
@property
def sftp_client(self):
"""Gets the SSH client for the connection.
:returns: An object of the class that inherits from
:py:class:`AbstractSFTPClient`.
"""
if not self._sftp_client:
self._sftp_client = self._create_sftp_client()
return self._sftp_client
@property
def shell(self):
"""Gets the shell for the connection.
:returns: An object of the class that inherits from
:py:class:`AbstractShell`.
"""
if not self._shell:
self._shell = self._create_shell()
return self._shell
def _create_sftp_client(self):
raise NotImplementedError
def _create_shell(self):
raise NotImplementedError
def close(self):
"""Closes the connection."""
self._shell = None
self.client.close()
def login(self, username, password, delay=None):
"""Logs into the remote host using password authentication.
This method reads the output from the remote host after logging in,
thus clearing the output. If prompt is set, everything until the prompt
is read (using :py:meth:`read_until_prompt` internally).
Otherwise everything on the output is read with the specified `delay`
(using :py:meth:`read` internally).
:param str username: Username to log in with.
:param str password: Password for the `username`.
:param str delay: The `delay` passed to :py:meth:`read` for reading
the output after logging in. The delay is only effective if
the prompt is not set.
:raises SSHClientException: If logging in failed.
:returns: The read output from the server.
"""
username = self._encode(username)
password = self._encode(password)
try:
self._login(username, password)
except SSHClientException:
raise SSHClientException("Authentication failed for user '%s'."
% username)
return self._read_login_output(delay)
def _encode(self, text):
if isinstance(text, str):
return text
if not isinstance(text, basestring):
text = unicode(text)
return text.encode(self.config.encoding)
def _login(self, username, password):
raise NotImplementedError
def _read_login_output(self, delay):
if self.config.prompt:
return self.read_until_prompt()
return self.read(delay)
def login_with_public_key(self, username, keyfile, password, delay=None):
"""Logs into the remote host using the public key authentication.
This method reads the output from the remote host after logging in,
thus clearing the output. If prompt is set, everything until the prompt
is read (using :py:meth:`read_until_prompt` internally).
Otherwise everything on the output is read with the specified `delay`
(using :py:meth:`read` internally).
:param str username: Username to log in with.
:param str keyfile: Path to the valid OpenSSH private key file.
:param str password: Password (if needed) for unlocking the `keyfile`.
:param str delay: The `delay` passed to :py:meth:`read` for reading
the output after logging in. The delay is only effective if
the prompt is not set.
:raises SSHClientException: If logging in failed.
:returns: The read output from the server.
"""
username = self._encode(username)
self._verify_key_file(keyfile)
try:
self._login_with_public_key(username, keyfile, password)
except SSHClientException:
raise SSHClientException("Login with public key failed for user "
"'%s'." % username)
return self._read_login_output(delay)
def _verify_key_file(self, keyfile):
if not os.path.exists(keyfile):
raise SSHClientException("Given key file '%s' does not exist." %
keyfile)
try:
open(keyfile).close()
except IOError:
raise SSHClientException("Could not read key file '%s'." % keyfile)
def _login_with_public_key(self, username, keyfile, password):
raise NotImplementedError
def execute_command(self, command):
"""Executes the `command` on the remote host.
This method waits until the output triggered by the execution of the
`command` is available and then returns it.
The `command` is always executed in a new shell, meaning that changes to
the environment are not visible to the subsequent calls of this method.
:param str command: The command to be executed on the remote host.
:returns: A 3-tuple (stdout, stderr, return_code) with values
`stdout` and `stderr` as strings and `return_code` as an integer.
"""
self.start_command(command)
return self.read_command_output()
def start_command(self, command):
"""Starts the execution of the `command` on the remote host.
The started `command` is pushed into an internal stack. This stack
always has the latest started `command` on top of it.
The `command` is always started in a new shell, meaning that changes to
the environment are not visible to the subsequent calls of this method.
This method does not return anything. Use :py:meth:`read_command_output`
to get the output of the previous started command.
:param str command: The command to be started on the remote host.
"""
command = self._encode(command)
self._started_commands.append(self._start_command(command))
def _start_command(self, command):
raise NotImplementedError
def read_command_output(self):
"""Reads the output of the previous started command.
The previous started command, started with :py:meth:`start_command`,
is popped out of the stack and its outputs (stdout, stderr and the
return code) are read and returned.
:raises SSHClientException: If there are no started commands to read
output from.
:returns: A 3-tuple (stdout, stderr, return_code) with values
`stdout` and `stderr` as strings and `return_code` as an integer.
"""
try:
return self._started_commands.pop().read_outputs()
except IndexError:
raise SSHClientException('No started commands to read output from.')
def write(self, text, add_newline=False):
"""Writes `text` in the current shell.
:param str text: The text to be written.
:param bool add_newline: If `True`, the configured newline will be
appended to the `text` before writing it on the remote host.
The newline is set when calling :py:meth:`open_connection`
"""
text = self._encode(text)
if add_newline:
text += self.config.newline
self.shell.write(text)
def read(self, delay=None):
"""Reads all output available in the current shell.
Reading always consumes the output, meaning that after being read,
the read content is no longer present in the output.
:param str delay: If given, this method reads again after the delay
to see if there is more output is available. This wait-read cycle is
repeated as long as further reads return more output or the
configured timeout expires. The timeout is set when calling
:py:meth:`open_connection`. The delay can be given as an integer
(the number of seconds) or in Robot Framework's time format, e.g.
`4.5s`, `3 minutes`, `2 min 3 sec`.
:returns: The read output from the remote host.
"""
output = self.shell.read()
if delay:
output += self._delayed_read(delay)
return self._decode(output)
def _decode(self, output):
return output.decode(self.config.encoding)
def _delayed_read(self, delay):
delay = TimeEntry(delay).value
max_time = time.time() + self.config.get('timeout').value
output = ''
while time.time() < max_time:
time.sleep(delay)
read = self.shell.read()
if not read:
break
output += read
return output
def read_char(self):
"""Reads a single char from the current shell.
Reading always consumes the output, meaning that after being read,
the read content is no longer present in the output.
:returns: A single char read from the output.
"""
server_output = ''
while True:
try:
server_output += self.shell.read_byte()
return self._decode(server_output)
except UnicodeDecodeError:
pass
def read_until(self, expected):
"""Reads output from the current shell until the `expected` text is
encountered or the timeout expires.
The timeout is set when calling :py:meth:`open_connection`.
Reading always consumes the output, meaning that after being read,
the read content is no longer present in the output.
:param str expected: The text to look for in the output.
:raises SSHClientException: If `expected` is not found in the output
when the timeout expires.
:returns: The read output, including the encountered `expected` text.
"""
expected = self._encode(expected)
return self._read_until(lambda s: expected in s, expected)
def _read_until(self, matcher, expected, timeout=None):
output = ''
timeout = TimeEntry(timeout) if timeout else self.config.get('timeout')
max_time = time.time() + timeout.value
while time.time() < max_time:
output += self.read_char()
if matcher(output):
return output
raise SSHClientException("No match found for '%s' in %s\nOutput:\n%s."
% (expected, timeout, output))
def read_until_newline(self):
"""Reads output from the current shell until a newline character is
encountered or the timeout expires.
The newline character and the timeout are set when calling
:py:meth:`open_connection`.
Reading always consumes the output, meaning that after being read,
the read content is no longer present in the output.
:raises SSHClientException: If the newline character is not found in the
output when the timeout expires.
:returns: The read output, including the encountered newline character.
"""
return self.read_until(self.config.newline)
def read_until_prompt(self):
"""Reads output from the current shell until the prompt is encountered
or the timeout expires.
The prompt and timeout are set when calling :py:meth:`open_connection`.
Reading always consumes the output, meaning that after being read,
the read content is no longer present in the output.
:raises SSHClientException: If prompt is not set or is not found
in the output when the timeout expires.
:returns: The read output, including the encountered prompt.
"""
if not self.config.prompt:
raise SSHClientException('Prompt is not set.')
return self.read_until(self.config.prompt)
def read_until_regexp(self, regexp):
"""Reads output from the current shell until the `regexp` matches or
the timeout expires.
The timeout is set when calling :py:meth:`open_connection`.
Reading always consumes the output, meaning that after being read,
the read content is no longer present in the output.
:param regexp: Either the regular expression as a string or a compiled
Regex object.
:raises SSHClientException: If no match against `regexp` is found when
the timeout expires.
:returns: The read output up and until the `regexp` matches.
"""
regexp = self._encode(regexp)
if isinstance(regexp, basestring):
regexp = re.compile(regexp)
return self._read_until(lambda s: regexp.search(s), regexp.pattern)
def write_until_expected(self, text, expected, timeout, interval):
"""Writes `text` repeatedly in the current shell until the `expected`
appears in the output or the `timeout` expires.
:param str text: Text to be written. Uses :py:meth:`write_bare`
internally so no newline character is appended to the written text.
:param str expected: Text to look for in the output.
:param int timeout: The timeout during which `expected` must appear
in the output. Can be given as an integer (the number of seconds)
or in Robot Framework's time format, e.g. `4.5s`, `3 minutes`,
`2 min 3 sec`.
:param int interval: Time to wait between the repeated writings of
`text`.
:raises SSHClientException: If `expected` is not found in the output
before the `timeout` expires.
:returns: The read output, including the encountered `expected` text.
"""
expected = self._encode(expected)
interval = TimeEntry(interval)
timeout = TimeEntry(timeout)
max_time = time.time() + timeout.value
while time.time() < max_time:
self.write(text)
try:
return self._read_until(lambda s: expected in s, expected,
timeout=interval.value)
except SSHClientException:
pass
raise SSHClientException("No match found for '%s' in %s."
% (expected, timeout))
def put_file(self, source, destination='.', mode='0744', newline='',
path_separator=''):
"""Calls :py:meth:`AbstractSFTPClient.put_file` with the given
arguments.
If `path_separator` is empty, the connection specific path separator,
which is set when calling :py:meth:`open_connection`, is used instead.
This is due to backward compatibility as `path_separator` was moved
to a connection specific setting in SSHLibrary 2.0.
See :py:meth:`AbstractSFTPClient.put_file` for more documentation.
"""
# TODO: Remove deprecated path_separator in SSHLibrary 2.1.
path_separator = path_separator or self.config.path_separator
return self.sftp_client.put_file(source, destination, mode, newline,
path_separator)
def put_directory(self, source, destination='.', mode='0744', newline='',
recursive=False):
"""Calls :py:meth:`AbstractSFTPClient.put_directory` with the given
arguments and the connection specific path separator.
The connection specific path separator is set when calling
:py:meth:`open_connection`.
See :py:meth:`AbstractSFTPClient.put_directory` for more documentation.
"""
return self.sftp_client.put_directory(source, destination, mode,
newline,
self.config.path_separator,
recursive)
def get_file(self, source, destination='.', path_separator=''):
"""Calls :py:meth:`AbstractSFTPClient.get_file` with the given
arguments.
If `path_separator` is empty, the connection specific path separator,
which is set when calling :py:meth:`open_connection`, is used instead.
This is due to backward compatibility as `path_separator` was moved
to a connection specific setting in SSHLibrary 2.0.
See :py:meth:`AbstractSFTPClient.get_file` for more documentation.
"""
# TODO: Remove deprecated path_separator in SSHLibrary 2.1.
path_separator = path_separator or self.config.path_separator
return self.sftp_client.get_file(source, destination, path_separator)
def get_directory(self, source, destination='.', recursive=False):
"""Calls :py:meth:`AbstractSFTPClient.get_directory` with the given
arguments and the connection specific path separator.
The connection specific path separator is set when calling
:py:meth:`open_connection`.
See :py:meth:`AbstractSFTPClient.get_directory` for more documentation.
"""
return self.sftp_client.get_directory(source, destination,
self.config.path_separator,
recursive)
def list_dir(self, path, pattern=None, absolute=False):
"""Calls :py:meth:`.AbstractSFTPClient.list_dir` with the given
arguments.
See :py:meth:`AbstractSFTPClient.list_dir` for more documentation.
:returns: A sorted list of items returned by
:py:meth:`AbstractSFTPClient.list_dir`.
"""
items = self.sftp_client.list_dir(path, pattern, absolute)
return sorted(items)
def list_files_in_dir(self, path, pattern=None, absolute=False):
"""Calls :py:meth:`AbstractSFTPClient.list_files_in_dir` with the given
arguments.
See :py:meth:`AbstractSFTPClient.list_files_in_dir` for more documentation.
:returns: A sorted list of items returned by
:py:meth:`AbstractSFTPClient.list_files_in_dir`.
"""
files = self.sftp_client.list_files_in_dir(path, pattern, absolute)
return sorted(files)
def list_dirs_in_dir(self, path, pattern=None, absolute=False):
"""Calls :py:meth:`AbstractSFTPClient.list_dirs_in_dir` with the given
arguments.
See :py:meth:`AbstractSFTPClient.list_dirs_in_dir` for more documentation.
:returns: A sorted list of items returned by
:py:meth:`AbstractSFTPClient.list_dirs_in_dir`.
"""
dirs = self.sftp_client.list_dirs_in_dir(path, pattern, absolute)
return sorted(dirs)
def is_dir(self, path):
"""Calls :py:meth:`AbstractSFTPClient.is_dir` with the given `path`.
See :py:meth:`AbstractSFTPClient.is_dir` for more documentation.
"""
return self.sftp_client.is_dir(path)
def is_file(self, path):
"""Calls :py:meth:`AbstractSFTPClient.is_file` with the given `path`.
See :py:meth:`AbstractSFTPClient.is_file` for more documentation.
"""
return self.sftp_client.is_file(path)
class AbstractShell(object):
"""Base class for the shell implementation.
Classes derived from this class (i.e. :py:class:`pythonclient.Shell`
and :py:class:`javaclient.Shell`) provide the concrete and the language
specific implementations for reading and writing in a shell session.
"""
def read(self):
"""Reads all the output from the shell.
:returns: The read output.
"""
raise NotImplementedError
def read_byte(self):
"""Reads a single byte from the shell.
:returns: The read byte.
"""
raise NotImplementedError
def write(self, text):
"""Writes the `text` in the current shell.
:param str text: The text to be written. No newline characters are
be appended automatically to the written text by this method.
"""
raise NotImplementedError
class AbstractSFTPClient(object):
"""Base class for the SFTP implementation.
Classes derived from this class (i.e. :py:class:`pythonclient.SFTPClient`
and :py:class:`javaclient.SFTPClient`) provide the concrete and the language
specific implementations for getting, putting and listing files and
directories.
"""
def __init__(self):
self._homedir = self._absolute_path('.')
def _absolute_path(self, path):
raise NotImplementedError
def is_file(self, path):
"""Checks if the `path` points to a regular file on the remote host.
If the `path` is a symlink, its destination is checked instead.
:param str path: The path to check.
:returns: `True`, if the `path` is points to an existing regular file.
False otherwise.
"""
try:
item = self._stat(path)
except IOError:
return False
return item.is_regular()
def _stat(self, path):
raise NotImplementedError
def is_dir(self, path):
"""Checks if the `path` points to a directory on the remote host.
If the `path` is a symlink, its destination is checked instead.
:param str path: The path to check.
:returns: `True`, if the `path` is points to an existing directory.
False otherwise.
"""
try:
item = self._stat(path)
except IOError:
return False
return item.is_directory()
def list_dir(self, path, pattern=None, absolute=False):
"""Gets the item names, or optionally the absolute paths, on the given
`path` on the remote host.
This includes regular files, directories as well as other file types,
e.g. device files.
:param str path: The path on the remote host to list.
:param str pattern: If given, only the item names that match
the given pattern are returned. Please do note, that the `pattern`
is never matched against the full path, even if `absolute` is set
`True`.
:param bool absolute: If `True`, the absolute paths of the items are
returned instead of the item names.
:returns: A list containing either the item names or the absolute
paths. In both cases, the List is first filtered by the `pattern`
if it is given.
"""
return self._list_filtered(path, self._get_item_names, pattern,
absolute)
def _list_filtered(self, path, filter_method, pattern=None, absolute=False):
self._verify_remote_dir_exists(path)
items = filter_method(path)
if pattern:
items = self._filter_by_pattern(items, pattern)
if absolute:
items = self._include_absolute_path(items, path)
return items
def _verify_remote_dir_exists(self, path):
if not self.is_dir(path):
raise SSHClientException("There was no directory matching '%s'." %
path)
def _get_item_names(self, path):
return [item.name for item in self._list(path)]
def _list(self, path):
raise NotImplementedError
def _filter_by_pattern(self, items, pattern):
return [name for name in items if fnmatchcase(name, pattern)]
def _include_absolute_path(self, items, path):
absolute_path = self._absolute_path(path)
if absolute_path[1:3] == ':\\':
absolute_path += '\\'
else:
absolute_path += '/'
return [absolute_path + name for name in items]
def list_files_in_dir(self, path, pattern=None, absolute=False):
"""Gets the file names, or optionally the absolute paths, of the regular
files on the given `path` on the remote host.
.
:param str path: The path on the remote host to list.
:param str pattern: If given, only the file names that match
the given pattern are returned. Please do note, that the `pattern`
is never matched against the full path, even if `absolute` is set
`True`.
:param bool absolute: If `True`, the absolute paths of the regular files
are returned instead of the file names.
:returns: A list containing either the regular file names or the absolute
paths. In both cases, the List is first filtered by the `pattern`
if it is given.
"""
return self._list_filtered(path, self._get_file_names, pattern,
absolute)
def _get_file_names(self, path):
return [item.name for item in self._list(path) if item.is_regular()]
def list_dirs_in_dir(self, path, pattern=None, absolute=False):
"""Gets the directory names, or optionally the absolute paths, on the
given `path` on the remote host.
:param str path: The path on the remote host to list.
:param str pattern: If given, only the directory names that match
the given pattern are returned. Please do note, that the `pattern`
is never matched against the full path, even if `absolute` is set
`True`.
:param bool absolute: If `True`, the absolute paths of the directories
are returned instead of the directory names.
:returns: A list containing either the directory names or the absolute
paths. In both cases, the List is first filtered by the `pattern`
if it is given.
"""
return self._list_filtered(path, self._get_directory_names, pattern,
absolute)
def _get_directory_names(self, path):
return [item.name for item in self._list(path) if item.is_directory()]
def get_directory(self, source, destination, path_separator='/',
recursive=False):
"""Downloads directory(-ies) from the remote host to the local machine,
optionally with subdirectories included.
:param str source: The path to the directory on the remote machine.
:param str destination: The target path on the local machine.
The destination defaults to the current local working directory.
:param str path_separator: The path separator used for joining the
paths on the remote host. On Windows, this must be set as `\`.
The default is `/`, which is also the default on Linux-like systems.
:param bool recursive: If `True`, the subdirectories in the `source`
path are downloaded as well.
:returns: A list of 2-tuples for all the downloaded files. These tuples
contain the remote path as the first value and the local target
path as the second.
"""
source = self._remove_ending_path_separator(path_separator, source)
self._verify_remote_dir_exists(source)
files = []
items = self.list_dir(source)
if items:
for item in items:
remote = source + path_separator + item
local = os.path.join(destination, item)
if self.is_file(remote):
files += self.get_file(remote, local)
elif recursive:
files += self.get_directory(remote, local, path_separator,
recursive)
else:
os.makedirs(destination)
files.append((source, destination))
return files
def _remove_ending_path_separator(self, path_separator, source):
if source.endswith(path_separator):
source = source[:-len(path_separator)]
return source
def get_file(self, source, destination, path_separator='/'):
"""Downloads file(s) from the remote host to the local machine.
:param str source: The path to the file on the remote machine.
Glob patterns, like '*' and '?', can be used in the source, in
which case all the matching files are downloaded.
:param str destination: The target path on the local machine.
If many files are downloaded, e.g. patterns are used in the
`source`, then this must be a path to an existing directory.
The destination defaults to the current local working directory.
:param str path_separator: The path separator used for joining the
paths on the remote host. On Windows, this must be set as `\`.
The default is `/`, which is also the default on Linux-like systems.
:returns: A list of 2-tuples for all the downloaded files. These tuples
contain the remote path as the first value and the local target
path as the second.
"""
remote_files = self._get_get_file_sources(source, path_separator)
if not remote_files:
msg = "There were no source files matching '%s'." % source
raise SSHClientException(msg)
local_files = self._get_get_file_destinations(remote_files, destination)
files = zip(remote_files, local_files)
for src, dst in files:
self._get_file(src, dst)
return files
def _get_get_file_sources(self, source, path_separator):
if path_separator in source:
path, pattern = source.rsplit(path_separator, 1)
else:
path, pattern = '', source
if not path:
path = '.'
return [filename for filename in
self.list_files_in_dir(path, pattern, absolute=True)]
def _get_get_file_destinations(self, source_files, destination):
target_is_dir = destination.endswith(os.sep) or destination == '.'
if not target_is_dir and len(source_files) > 1:
raise SSHClientException('Cannot copy multiple source files to one '
'destination file.')
destination = os.path.abspath(destination.replace('/', os.sep))
self._create_missing_local_dirs(destination, target_is_dir)
if target_is_dir:
return [os.path.join(destination, os.path.basename(name))
for name in source_files]
return [destination]
def _create_missing_local_dirs(self, destination, target_is_dir):
if not target_is_dir:
destination = os.path.dirname(destination)
if not os.path.exists(destination):
os.makedirs(destination)
def _get_file(self, source, destination):
raise NotImplementedError
def put_directory(self, source, destination, mode, newline,
path_separator='/', recursive=False):
"""Uploads directory(-ies) from the local machine to the remote host,
optionally with subdirectories included.
:param str source: The path to the directory on the local machine.
:param str destination: The target path on the remote host.
The destination defaults to the user's home at the remote host.
:param str mode: The uploaded files on the remote host are created with
these modes. The modes are given as traditional Unix octal
permissions, such as '0600'.
:param str newline: If given, the newline characters of the uploaded
files on the remote host are converted to this.
:param str path_separator: The path separator used for joining the
paths on the remote host. On Windows, this must be set as `\`.
The default is `/`, which is also the default on Linux-like systems.
:param bool recursive: If `True`, the subdirectories in the `source`
path are uploaded as well.
:returns: A list of 2-tuples for all the uploaded files. These tuples
contain the local path as the first value and the remote target
path as the second.
"""
self._verify_local_dir_exists(source)
destination = self._remove_ending_path_separator(path_separator,
destination)
if self.is_dir(destination):
destination = destination + path_separator +\
source.rsplit(os.path.sep)[-1]
return self._put_directory(source, destination, mode, newline,
path_separator, recursive)
def _put_directory(self, source, destination, mode, newline,
path_separator, recursive):
files = []
items = os.listdir(source)
if items:
for item in items:
local_path = os.path.join(source, item)
remote_path = destination + path_separator + item
if os.path.isfile(local_path):
files += self.put_file(local_path, remote_path, mode,
newline, path_separator)
elif recursive and os.path.isdir(local_path):
files += self._put_directory(local_path, remote_path, mode,
newline, path_separator,
recursive)
else:
self._create_missing_remote_path(destination)
files.append((source, destination))
return files
def _verify_local_dir_exists(self, path):
if not os.path.isdir(path):
raise SSHClientException("There was no source path matching '%s'."
% path)
def put_file(self, sources, destination, mode, newline, path_separator='/'):
"""Uploads the file(s) from the local machine to the remote host.
:param str source: The path to the file on the local machine.
Glob patterns, like '*' and '?', can be used in the source, in
which case all the matching files are uploaded.
:param str destination: The target path on the remote host.
If multiple files are uploaded, e.g. patterns are used in the
`source`, then this must be a path to an existing directory.
The destination defaults to the user's home at the remote host.
:param str mode: The uploaded files on the remote host are created with
these modes. The modes are given as traditional Unix octal
permissions, such as '0600'.
:param str newline: If given, the newline characters of the uploaded
files on the remote host are converted to this.
:param str path_separator: The path separator used for joining the
paths on the remote host. On Windows, this must be set as `\`.
The default is `/`, which is also the default on Linux-like systems.
:returns: A list of 2-tuples for all the uploaded files. These tuples
contain the local path as the first value and the remote target
path as the second.
"""
mode = int(mode, 8)
newline = {'CRLF': '\r\n', 'LF': '\n'}.get(newline.upper(), None)
local_files = self._get_put_file_sources(sources)
remote_files, remote_dir = self._get_put_file_destinations(local_files,
destination,
path_separator)
self._create_missing_remote_path(remote_dir)
files = zip(local_files, remote_files)
for source, destination in files:
self._put_file(source, destination, mode, newline)
return files
def _get_put_file_sources(self, source):
sources = [f for f in glob.glob(source.replace('/', os.sep))
if os.path.isfile(f)]
if not sources:
msg = "There are no source files matching '%s'." % source
raise SSHClientException(msg)
return sources
def _get_put_file_destinations(self, sources, destination, path_separator):
destination = destination.split(':')[-1].replace('\\', '/')
if destination == '.':
destination = self._homedir + '/'
if len(sources) > 1 and destination[-1] != '/' and not self.is_dir(destination):
raise ValueError('It is not possible to copy multiple source '
'files to one destination file.')
dir_path, filename = self._parse_path_elements(destination,
path_separator)
if filename:
files = [path_separator.join([dir_path, filename])]
else:
files = [path_separator.join([dir_path, os.path.basename(path)])
for path in sources]
return files, dir_path
def _parse_path_elements(self, destination, path_separator):
def _isabs(path):
if destination.startswith(path_separator):
return True
if path_separator == '\\' and path[1:3] == ':\\':
return True
return False
if not _isabs(destination):
destination = path_separator.join([self._homedir, destination])
if self.is_dir(destination):
return destination, ''
return destination.rsplit(path_separator, 1)
def _create_missing_remote_path(self, path):
if path.startswith('/'):
current_dir = '/'
else:
current_dir = self._absolute_path('.')
for dir_name in path.split('/'):
if dir_name:
current_dir = '%s/%s' % (current_dir, dir_name)
try:
self._client.stat(current_dir)
except:
self._client.mkdir(current_dir, 0744)
def _put_file(self, source, destination, mode, newline):
remote_file = self._create_remote_file(destination, mode)
with open(source, 'rb') as local_file:
position = 0
while True:
data = local_file.read(4096)
if not data:
break
if newline and '\n' in data:
data = data.replace('\n', newline)
self._write_to_remote_file(remote_file, data, position)
position += len(data)
self._close_remote_file(remote_file)
def _create_remote_file(self, destination, mode):
raise NotImplementedError
def _write_to_remote_file(self, remote_file, data, position):
raise NotImplementedError
def _close_remote_file(self, remote_file):
raise NotImplementedError
class AbstractCommand(object):
"""Base class for the remote command.
Classes derived from this class (i.e. :py:class:`pythonclient.RemoteCommand`
and :py:class:`javaclient.RemoteCommand`) provide the concrete and the
language specific implementations for running the command on the remote
host.
"""
def __init__(self, command, encoding):
self._command = command
self._encoding = encoding
self._shell = None
def run_in(self, shell):
"""Runs this command in the given `shell`.
:param shell: A shell in the already open connection.
"""
self._shell = shell
self._execute()
def _execute(self):
raise NotImplementedError
def read_outputs(self):
"""Returns the outputs of this command.
:returns: A 3-tuple (stdout, stderr, return_code) with values
`stdout` and `stderr` as strings and `return_code` as an integer.
"""
raise NotImplementedError
class SFTPFileInfo(object):
"""Wrapper class for the language specific file information objects.
Returned by the concrete SFTP client implementations.
"""
def __init__(self, name, mode):
self.name = name
self.mode = mode
def is_regular(self):
"""Checks if this file is a regular file.
:returns: `True`, if the file is a regular file. False otherwise.
"""
return stat.S_ISREG(self.mode)
def is_directory(self):
"""Checks if this file is a directory.
:returns: `True`, if the file is a regular file. False otherwise.
"""
return stat.S_ISDIR(self.mode)
|
|
import json
import pytz
from datetime import datetime
from sqlalchemy import schema, types
from sqlalchemy.orm import relationship, validates
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects import mysql
__all__ = ['Base', 'ContentBlock', 'DataCollection', 'Service',
'InboxMessage', 'ResultSet', 'Subscription']
Base = declarative_base(name='Model')
MYSQL_LARGE_BINARY = mysql.MEDIUMBLOB()
def get_utc_now():
return datetime.utcnow().replace(tzinfo=pytz.UTC)
class AbstractModel(Base):
__abstract__ = True
date_created = schema.Column(
types.DateTime(timezone=True), default=get_utc_now)
collection_to_content_block = schema.Table(
'collection_to_content_block',
Base.metadata,
schema.Column(
'collection_id',
types.Integer,
schema.ForeignKey('data_collections.id', ondelete='CASCADE')),
schema.Column(
'content_block_id',
types.Integer,
schema.ForeignKey('content_blocks.id', ondelete='CASCADE'),
index=True),
schema.PrimaryKeyConstraint('collection_id', 'content_block_id')
)
class ContentBlock(AbstractModel):
__tablename__ = 'content_blocks'
id = schema.Column(types.Integer, primary_key=True)
message = schema.Column(types.Text, nullable=True)
timestamp_label = schema.Column(
types.DateTime(timezone=True),
default=get_utc_now, index=True)
inbox_message_id = schema.Column(
types.Integer,
schema.ForeignKey(
'inbox_messages.id', onupdate='CASCADE', ondelete='CASCADE'),
nullable=True)
content_type = types.LargeBinary().with_variant(MYSQL_LARGE_BINARY, 'mysql')
content = schema.Column(content_type, nullable=False)
binding_id = schema.Column(types.String(300), index=True)
binding_subtype = schema.Column(types.String(300), index=True)
collections = relationship(
'DataCollection',
secondary=collection_to_content_block,
backref='content_blocks',
lazy='dynamic')
@validates('collections', include_removes=True, include_backrefs=True)
def _update_volume(self, key, collection, is_remove):
if is_remove:
collection.volume = collection.__class__.volume - 1
else:
collection.volume = collection.__class__.volume + 1
return collection
def __repr__(self):
return ('ContentBlock(id={obj.id}, '
'inbox_message={obj.inbox_message_id}, '
'binding={obj.binding_subtype})').format(obj=self)
service_to_collection = schema.Table(
'service_to_collection',
Base.metadata,
schema.Column(
'service_id',
types.String(150),
schema.ForeignKey('services.id', ondelete='CASCADE')),
schema.Column(
'collection_id',
types.Integer,
schema.ForeignKey('data_collections.id', ondelete='CASCADE')),
schema.PrimaryKeyConstraint('service_id', 'collection_id')
)
class Service(AbstractModel):
__tablename__ = 'services'
id = schema.Column(types.String(150), primary_key=True)
type = schema.Column(types.String(150))
_properties = schema.Column(types.Text, nullable=False)
collections = relationship(
'DataCollection',
secondary=service_to_collection,
backref='services')
date_updated = schema.Column(
types.DateTime(timezone=True), default=get_utc_now)
@property
def properties(self):
return json.loads(self._properties)
@properties.setter
def properties(self, properties):
self._properties = json.dumps(properties)
class DataCollection(AbstractModel):
__tablename__ = 'data_collections'
id = schema.Column(types.Integer, primary_key=True)
name = schema.Column(types.String(300), index=True, unique=True)
type = schema.Column(types.String(150))
description = schema.Column(types.Text, nullable=True)
accept_all_content = schema.Column(types.Boolean, default=False)
bindings = schema.Column(types.Text)
available = schema.Column(types.Boolean, default=True)
volume = schema.Column(types.Integer, default=0)
def __repr__(self):
return ('DataCollection(name={obj.name}, type={obj.type})'
.format(obj=self))
class InboxMessage(AbstractModel):
__tablename__ = 'inbox_messages'
id = schema.Column(types.Integer, primary_key=True)
message_id = schema.Column(types.Text)
result_id = schema.Column(types.Text, nullable=True)
record_count = schema.Column(types.Integer, nullable=True)
partial_count = schema.Column(types.Boolean, default=False)
subscription_collection_name = schema.Column(types.Text, nullable=True)
subscription_id = schema.Column(types.Text, nullable=True)
exclusive_begin_timestamp_label = schema.Column(
types.DateTime(timezone=True), nullable=True)
inclusive_end_timestamp_label = schema.Column(
types.DateTime(timezone=True), nullable=True)
original_message_type = types.LargeBinary().with_variant(MYSQL_LARGE_BINARY, 'mysql')
original_message = schema.Column(original_message_type, nullable=False)
content_block_count = schema.Column(types.Integer)
# FIXME: should be a proper reference ID
destination_collections = schema.Column(types.Text, nullable=True)
service_id = schema.Column(
types.String(150),
schema.ForeignKey(
'services.id', onupdate="CASCADE", ondelete="CASCADE"))
service = relationship('Service', backref='inbox_messages')
def __repr__(self):
return ('InboxMessage(id={obj.message_id}, created={obj.date_created})'
.format(obj=self))
class ResultSet(AbstractModel):
__tablename__ = 'result_sets'
id = schema.Column(types.String(150), primary_key=True)
collection_id = schema.Column(
types.Integer,
schema.ForeignKey(
'data_collections.id', onupdate='CASCADE', ondelete='CASCADE'))
collection = relationship('DataCollection', backref='result_sets')
bindings = schema.Column(types.Text)
begin_time = schema.Column(types.DateTime(timezone=True), nullable=True)
end_time = schema.Column(types.DateTime(timezone=True), nullable=True)
class Subscription(AbstractModel):
__tablename__ = 'subscriptions'
id = schema.Column(types.String(150), primary_key=True)
collection_id = schema.Column(
types.Integer,
schema.ForeignKey(
'data_collections.id', onupdate='CASCADE', ondelete='CASCADE'))
collection = relationship('DataCollection', backref='subscriptions')
params = schema.Column(types.Text, nullable=True)
# FIXME: proper enum type
status = schema.Column(types.String(150))
service_id = schema.Column(
types.String(150),
schema.ForeignKey(
'services.id', onupdate="CASCADE", ondelete="CASCADE"))
service = relationship('Service', backref='subscriptions')
|
|
import os
import argparse
from flask import current_app
try:
from flask_script import Manager
except ImportError:
Manager = None
from alembic import __version__ as __alembic_version__
from alembic.config import Config as AlembicConfig
from alembic import command
alembic_version = tuple([int(v) for v in __alembic_version__.split('.')[0:3]])
class _MigrateConfig(object):
def __init__(self, migrate, db, **kwargs):
self.migrate = migrate
self.db = db
self.directory = migrate.directory
self.configure_args = kwargs
@property
def metadata(self):
"""
Backwards compatibility, in old releases app.extensions['migrate']
was set to db, and env.py accessed app.extensions['migrate'].metadata
"""
return self.db.metadata
class Config(AlembicConfig):
def get_template_directory(self):
package_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(package_dir, 'templates')
class Migrate(object):
def __init__(self, app=None, db=None, directory='migrations', **kwargs):
self.configure_callbacks = []
self.db = db
self.directory = directory
self.alembic_ctx_kwargs = kwargs
if app is not None and db is not None:
self.init_app(app, db, directory)
def init_app(self, app, db=None, directory=None, **kwargs):
self.db = db or self.db
self.directory = directory or self.directory
self.alembic_ctx_kwargs.update(kwargs)
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['migrate'] = _MigrateConfig(self, self.db,
**self.alembic_ctx_kwargs)
def configure(self, f):
self.configure_callbacks.append(f)
return f
def call_configure_callbacks(self, config):
for f in self.configure_callbacks:
config = f(config)
return config
def get_config(self, directory=None, x_arg=None, opts=None):
if directory is None:
directory = self.directory
config = Config(os.path.join(directory, 'alembic.ini'))
config.set_main_option('script_location', directory)
if config.cmd_opts is None:
config.cmd_opts = argparse.Namespace()
for opt in opts or []:
setattr(config.cmd_opts, opt, True)
if not hasattr(config.cmd_opts, 'x'):
if x_arg is not None:
setattr(config.cmd_opts, 'x', [])
if isinstance(x_arg, list) or isinstance(x_arg, tuple):
for x in x_arg:
config.cmd_opts.x.append(x)
else:
config.cmd_opts.x.append(x_arg)
else:
setattr(config.cmd_opts, 'x', None)
return self.call_configure_callbacks(config)
if Manager is not None:
MigrateCommand = Manager(usage='Perform database migrations')
else:
class FakeCommand(object):
def option(self, *args, **kwargs):
def decorator(f):
return f
return decorator
MigrateCommand = FakeCommand()
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
@MigrateCommand.option('--multidb', dest='multidb', action='store_true',
default=False,
help=("Multiple databases migraton (default is "
"False)"))
def init(directory=None, multidb=False):
"""Creates a new migration repository"""
if directory is None:
directory = current_app.extensions['migrate'].directory
config = Config()
config.set_main_option('script_location', directory)
config.config_file_name = os.path.join(directory, 'alembic.ini')
config = current_app.extensions['migrate'].\
migrate.call_configure_callbacks(config)
if multidb:
command.init(config, directory, 'flask-multidb')
else:
command.init(config, directory, 'flask')
@MigrateCommand.option('--rev-id', dest='rev_id', default=None,
help=('Specify a hardcoded revision id instead of '
'generating one'))
@MigrateCommand.option('--version-path', dest='version_path', default=None,
help=('Specify specific path from config for version '
'file'))
@MigrateCommand.option('--branch-label', dest='branch_label', default=None,
help=('Specify a branch label to apply to the new '
'revision'))
@MigrateCommand.option('--splice', dest='splice', action='store_true',
default=False,
help=('Allow a non-head revision as the "head" to '
'splice onto'))
@MigrateCommand.option('--head', dest='head', default='head',
help=('Specify head revision or <branchname>@head to '
'base new revision on'))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('--autogenerate', dest='autogenerate',
action='store_true', default=False,
help=('Populate revision script with candidate '
'migration operations, based on comparison of '
'database to model'))
@MigrateCommand.option('-m', '--message', dest='message', default=None,
help='Revision message')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def revision(directory=None, message=None, autogenerate=False, sql=False,
head='head', splice=False, branch_label=None, version_path=None,
rev_id=None):
"""Create a new revision file."""
config = current_app.extensions['migrate'].migrate.get_config(directory)
if alembic_version >= (0, 7, 0):
command.revision(config, message, autogenerate=autogenerate, sql=sql,
head=head, splice=splice, branch_label=branch_label,
version_path=version_path, rev_id=rev_id)
else:
command.revision(config, message, autogenerate=autogenerate, sql=sql)
@MigrateCommand.option('--rev-id', dest='rev_id', default=None,
help=('Specify a hardcoded revision id instead of '
'generating one'))
@MigrateCommand.option('--version-path', dest='version_path', default=None,
help=('Specify specific path from config for version '
'file'))
@MigrateCommand.option('--branch-label', dest='branch_label', default=None,
help=('Specify a branch label to apply to the new '
'revision'))
@MigrateCommand.option('--splice', dest='splice', action='store_true',
default=False,
help=('Allow a non-head revision as the "head" to '
'splice onto'))
@MigrateCommand.option('--head', dest='head', default='head',
help=('Specify head revision or <branchname>@head to '
'base new revision on'))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('-m', '--message', dest='message', default=None)
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
@MigrateCommand.option('-x', '--x-arg', dest='x_arg', default=None,
action='append', help=("Additional arguments consumed "
"by custom env.py scripts"))
def migrate(directory=None, message=None, sql=False, head='head', splice=False,
branch_label=None, version_path=None, rev_id=None, x_arg=None):
"""Alias for 'revision --autogenerate'"""
config = current_app.extensions['migrate'].migrate.get_config(
directory, opts=['autogenerate'], x_arg=x_arg)
if alembic_version >= (0, 7, 0):
command.revision(config, message, autogenerate=True, sql=sql,
head=head, splice=splice, branch_label=branch_label,
version_path=version_path, rev_id=rev_id)
else:
command.revision(config, message, autogenerate=True, sql=sql)
@MigrateCommand.option('revision', nargs='?', default='head',
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def edit(directory=None, revision='current'):
"""Edit current revision."""
if alembic_version >= (0, 8, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.edit(config, revision)
else:
raise RuntimeError('Alembic 0.8.0 or greater is required')
@MigrateCommand.option('--rev-id', dest='rev_id', default=None,
help=('Specify a hardcoded revision id instead of '
'generating one'))
@MigrateCommand.option('--branch-label', dest='branch_label', default=None,
help=('Specify a branch label to apply to the new '
'revision'))
@MigrateCommand.option('-m', '--message', dest='message', default=None)
@MigrateCommand.option('revisions', nargs='+',
help='one or more revisions, or "heads" for all heads')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def merge(directory=None, revisions='', message=None, branch_label=None,
rev_id=None):
"""Merge two revisions together. Creates a new migration file"""
if alembic_version >= (0, 7, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.merge(config, revisions, message=message,
branch_label=branch_label, rev_id=rev_id)
else:
raise RuntimeError('Alembic 0.7.0 or greater is required')
@MigrateCommand.option('--tag', dest='tag', default=None,
help=("Arbitrary 'tag' name - can be used by custom "
"env.py scripts"))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('revision', nargs='?', default='head',
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
@MigrateCommand.option('-x', '--x-arg', dest='x_arg', default=None,
action='append', help=("Additional arguments consumed "
"by custom env.py scripts"))
def upgrade(directory=None, revision='head', sql=False, tag=None, x_arg=None):
"""Upgrade to a later version"""
config = current_app.extensions['migrate'].migrate.get_config(directory,
x_arg=x_arg)
command.upgrade(config, revision, sql=sql, tag=tag)
@MigrateCommand.option('--tag', dest='tag', default=None,
help=("Arbitrary 'tag' name - can be used by custom "
"env.py scripts"))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('revision', nargs='?', default="-1",
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
@MigrateCommand.option('-x', '--x-arg', dest='x_arg', default=None,
action='append', help=("Additional arguments consumed "
"by custom env.py scripts"))
def downgrade(directory=None, revision='-1', sql=False, tag=None, x_arg=None):
"""Revert to a previous version"""
config = current_app.extensions['migrate'].migrate.get_config(directory,
x_arg=x_arg)
if sql and revision == '-1':
revision = 'head:-1'
command.downgrade(config, revision, sql=sql, tag=tag)
@MigrateCommand.option('revision', nargs='?', default="head",
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def show(directory=None, revision='head'):
"""Show the revision denoted by the given symbol."""
if alembic_version >= (0, 7, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.show(config, revision)
else:
raise RuntimeError('Alembic 0.7.0 or greater is required')
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-r', '--rev-range', dest='rev_range', default=None,
help='Specify a revision range; format is [start]:[end]')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def history(directory=None, rev_range=None, verbose=False):
"""List changeset scripts in chronological order."""
config = current_app.extensions['migrate'].migrate.get_config(directory)
if alembic_version >= (0, 7, 0):
command.history(config, rev_range, verbose=verbose)
else:
command.history(config, rev_range)
@MigrateCommand.option('--resolve-dependencies', dest='resolve_dependencies',
action='store_true', default=False,
help='Treat dependency versions as down revisions')
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def heads(directory=None, verbose=False, resolve_dependencies=False):
"""Show current available heads in the script directory"""
if alembic_version >= (0, 7, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.heads(config, verbose=verbose,
resolve_dependencies=resolve_dependencies)
else:
raise RuntimeError('Alembic 0.7.0 or greater is required')
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def branches(directory=None, verbose=False):
"""Show current branch points"""
config = current_app.extensions['migrate'].migrate.get_config(directory)
if alembic_version >= (0, 7, 0):
command.branches(config, verbose=verbose)
else:
command.branches(config)
@MigrateCommand.option('--head-only', dest='head_only', action='store_true',
default=False,
help='Deprecated. Use --verbose for additional output')
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def current(directory=None, verbose=False, head_only=False):
"""Display the current revision for each database."""
config = current_app.extensions['migrate'].migrate.get_config(directory)
if alembic_version >= (0, 7, 0):
command.current(config, verbose=verbose, head_only=head_only)
else:
command.current(config)
@MigrateCommand.option('--tag', dest='tag', default=None,
help=("Arbitrary 'tag' name - can be used by custom "
"env.py scripts"))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('revision', default=None, help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def stamp(directory=None, revision='head', sql=False, tag=None):
"""'stamp' the revision table with the given revision; don't run any
migrations"""
config = current_app.extensions['migrate'].migrate.get_config(directory)
command.stamp(config, revision, sql=sql, tag=tag)
|
|
import numpy as n, matplotlib.pyplot as p, scipy.special
import cosmolopy.perturbation as pb
import cosmolopy.density as cd
from scipy.integrate import quad, tplquad
import itertools
from scipy.interpolate import interp1d
from scipy.interpolate import RectBivariateSpline as RBS
Om,sig8,ns,h,Ob = 0.315, 0.829, 0.96, 0.673, 0.0487
cosmo = {'baryonic_effects':True,'omega_k_0':0,'omega_M_0':0.315, 'omega_b_0':0.0487, 'n':0.96, 'N_nu':0, 'omega_lambda_0':0.685,'omega_n_0':0., 'sigma_8':0.829,'h':0.673}
def m2R(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
RL = (3*m/4/n.pi/rhobar)**(1./3)
return RL
def m2V(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
return m/rhobar
def R2m(RL):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
m = 4*n.pi/3*rhobar*RL**3
return m
def mmin(z,Tvir=1.E4):
return pb.virial_mass(Tvir,z,**cosmo)
def RG(RL): return 0.46*RL
def W(y): return 3/y**3*(n.sin(y)-y*n.cos(y))
def WG(y): return n.exp(-y**2/2)
def Del2k(k):
Pk = pb.power_spectrum(k,0.,**cosmo)
Del2k = k**3*Pk/2/n.pi**2
#fgrowth = pb.fgrowth(z, cosmo['omega_M_0'])
#Del2k0 = Del2k/fgrowth**2#*pb.norm_power(**cosmo)
return Del2k
#def sig0(RL,Del2k):
# return n.sum(Del2k**2*W(RL*k)**2)*(logk[1]-logk[0])
#def sig0(RL,Del2k):
# return n.sum(Del2k**2*W(RL*k)**2/k)*(k[1]-k[0])
def polyval2d(x, y, m):
order = int(n.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = n.zeros_like(x)
for a, (i,j) in zip(m, ij):
z += a * x**i * y**j
return z
def sig0test(RL,kmax):
return quad(lambda k: Del2k(k)*W(RL*k)**2/k, 0, kmax)[0] #z=0 extrapolated to present
def sig0(RL):
return (pb.sigma_r(RL,0.,**cosmo)[0])**2
def sigG(RL,j):
return (pb.sigma_j(RL,j,0.,**cosmo)[0])**2
dsig1m = n.load('sig1m.npz')
sig1mRl,sig1marr = dsig1m['arr_0'],dsig1m['arr_1']
fs1m = interp1d(sig1mRl,sig1marr,kind='cubic')
def sig1m(RL):
return fs1m(RL)
#def sig1m(RL,kmax=20.):
#coeff = n.array([-35197.22457096, 44816.6140037 , -22450.21477783, 5671.79478317,
# -790.99091133, 74.00855598])
#n.array([ 1.81095565, -6.51689501, 0.03932317, 12.22205831])
#return n.poly1d(coeff)(RL)
#return quad(lambda k: Del2k(k)*W(RL*k)*WG(RG(RL)*k)/k, 0, kmax)[0]
#return n.sum(Del2k*k**2*n.exp(-k**2*RG(RL)**2/2)*W(RL*k))*(logk[1]-logk[0])
#def SX(RL,R0,kmax=20.):
#coeff = n.array([22.25,-6.645,0.54936,0.0128,18.66,6.029,-0.4879,0.01109,4.8616,-1.4594,0.1096,-0.00235,-0.384,0.107,-0.00741,0.0])
#return polyval2d(RL,R0,coeff)
#return quad(lambda k: Del2k(k)*W(RL*k)*W(R0*k)/k, 0, kmax)[0]
#def sig1mX(RL,R0,kmax=20.):
#logr,logR0 = n.log(RL),n.log(R0)
#coeff = n.array([ 7.08046191, 28.16149525, -23.50798007, 4.20273492,
# -34.31345153, 101.96878325, -78.59663353, 16.35608005,
# -35.10071616, 1.19563953, 18.76803373, -5.08233304,
# -7.29945622, -5.95674768, 9.93434604, -2.36906904])
#return polyval2d(logr,logR0,coeff)
#return quad(lambda k: Del2k(k)*(k**2)*WG(RG(RL)*k)*W(R0*k)/k, 0, kmax)[0]
#def SX(RL,R0,kf=20.):
# kmax = kf/R0
# return quad(lambda k: Del2k(k)*W(RL*k)*W(R0*k)/k, 0, kmax)[0]
#def sig1mX(RL,R0,kf=20.):
# kmax = kf/R0
# return quad(lambda k: Del2k(k)*(k**2)*WG(RG(RL)*k)*W(R0*k)/k, 0, kmax)[0]
dSX = n.load('logSX.npz')
lSXRl,lSXR0,arrSX = dSX['arr_0'],dSX['arr_1'],dSX['arr_2']
fSX = RBS(lSXRl,lSXR0,arrSX)
def SX(RL,R0):
res = fSX(n.log(RL),n.log(R0))
if res.size > 1: print 'Warning: SX called with array instead of single number'
return res[0][0]
ds1mX = n.load('logsig1mX.npz')
ls1mXRl,ls1mXR0,arrs1mX = ds1mX['arr_0'],ds1mX['arr_1'],ds1mX['arr_2']
fs1mX = RBS(ls1mXRl,ls1mXR0,arrs1mX)
def sig1mX(RL,R0):
res = fs1mX(n.log(RL),n.log(R0))
if res.size > 1: print 'Warning: s1mX called with array instead of single number'
return res[0][0]
#def SX(RL,R0,kf=10.):
# logmax = n.log(kf/R0)
# return quad(lambda logk: Del2k(n.exp(logk))*W(RL*n.exp(logk))*W(R0*n.exp(logk)), 0, logmax)[0]
#def sig1mX(RL,R0,kf=10.):
# logmax = n.log(kf/R0)
# return quad(lambda logk: Del2k(n.exp(logk))*(n.exp(logk)**2)*WG(RG(RL)*n.exp(logk))*W(R0*n.exp(logk)), 0, logmax)[0]
def gam(RL):
return sig1m(RL)/n.sqrt(sig0(RL)*sigG(RL,2))
def Vstar(RL):
return (6*n.pi)**1.5*(sigG(RL,1)/sigG(RL,2))**3
def erf(x):
return scipy.special.erf(x)
def prob(x,av=0.5,var=0.25):
return 1/n.sqrt(2*n.pi*var)/x*n.exp(-(n.log(x)-av)**2/2/var)
def F(x):
return (x**3-3*x)/2*(erf(x*n.sqrt(5./2))+erf(x*n.sqrt(5./8)))+n.sqrt(2./5/n.pi)*((31*x**2/4+8./5)*n.exp(-5.*x**2/8)+(x**2/2-8./5)*n.exp(-5.*x**2/2))
def Deltac(z):
fgrowth = pb.fgrowth(z, cosmo['omega_M_0']) # = D(z)/D(0)
return 1.686/fgrowth
def pG(y,av,var):
return 1/n.sqrt(2*n.pi*var)*n.exp(-(y-av)**2/2/var)
def B(z,beta,s):
return Deltac(z)+beta*n.sqrt(s)
def Q(m,M0):
r,R0 = m2R(m), m2R(M0)
s,s0 = sig0(r), sig0(R0)
sx = SX(r,R0)
return 1-sx**2/s/s0
def epX(m,M0):
r,R0 = m2R(m), m2R(M0)
s,s0 = sig0(r), sig0(R0)
sx = SX(r,R0)
sg1m = sig1m(r)
sg1mX = sig1mX(r,R0)
return s*sg1m/sx/sg1mX
def subgrand(b,del0,m,M0,z):
V,r,dmdr = pb.volume_radius_dmdr(m,**cosmo)
R0 = m2R(M0)
s,s0,sx = sig0(r), sig0(R0),SX(r,R0)
Bb = B(z,b,s)
gamm = gam(r)
epx,q = epX(m,M0), Q(m,M0)
print 'gamm,epx,q =',gamm,epx,q
meanmu = del0/n.sqrt(s)*sx/s0
varmu = Q(m,M0)
meanx = gamm*((Bb-del0*sx/s0)*(1-epx)/q/n.sqrt(s)+Bb*epx/n.sqrt(s))
varx = 1-gamm**2-gamm**2*(1-epx)**2*(1-q)/q
fact = V/Vstar(R0)*pG(Bb/n.sqrt(s),meanmu, varmu)
print b, Bb/n.sqrt(s),meanmu,varmu,pG(Bb/n.sqrt(s),meanmu, varmu)
factint = quad(lambda x: (x/gamm-b)*F(x)*pG(x,meanx,varx),b*gamm,100)[0]
#print fact, factint
return fact*factint
def integrand(del0,m,M0,z): #2s*f_ESP
s = sig0(m2R(m))
print '#################'
return quad(lambda b: prob(b)*subgrand(b,del0,m,M0,z),0,4.)[0]/2/s
def dsdm(m):
return (sig0(m2R(m+1))-sig0(m2R(m-1)))/2
def fcoll(del0,M0,z):
return quad(lambda m: integrand(del0,m,M0,z)*dsdm(m),mmin(z),M0)
def All(x,b,m,del0,M0,z): #z,y,x,c,c,c
V,r,dmdr = pb.volume_radius_dmdr(m,**cosmo)
R0 = m2R(M0)
s,s0,sx = sig0(r), sig0(R0),SX(r,R0)
Bb = B(z,b,s)
gamm = gam(r)
epx,q = epX(m,M0), Q(m,M0)
#print 'gamm,epx,q =',gamm,epx,q
meanmu = del0/n.sqrt(s)*sx/s0
varmu = Q(m,M0)
meanx = gamm*((Bb-del0*sx/s0)*(1-epx)/q/n.sqrt(s)+Bb*epx/n.sqrt(s))
varx = 1-gamm**2-gamm**2*(1-epx)**2*(1-q)/q
fact = V/Vstar(R0)*pG(Bb/n.sqrt(s),meanmu, varmu)
#print b, Bb/n.sqrt(s),meanmu,varmu,pG(Bb/n.sqrt(s),meanmu, varmu)
return fact*prob(b)*(x/gamm-b)*F(x)*pG(x,meanx,varx)/2/sig0(m2R(m))*dsdm(m)
p.figure()
Z = [12.]
###################### PARAMETERS ############################
#z = 12.
for z in Z:
deltac = Deltac(z)
#deltac = 1.686*(1+z) #z_eq =3233?
##print deltac
#Del2k0 = Del2k/fgrowth**2 #linearly extrapolated to present epoch
####################################
#sig_8 = n.sqrt(sig0(8./cosmo['h'],Del2k0))
#print sig_8
sig_8 = n.sqrt(sig0(8./cosmo['h']))
print 'sig_8',sig_8
#Del2k0 = Del2k0*(sig8/sig_8)
####################################
zeta = 40.
K = scipy.special.erfinv(1-1./zeta)
print 'K(zeta)=',K
#import IPython; IPython.embed()
####################### FZH04 ##############################
##### m_min
Tvir = 1.E4
#mmin = (Tvir/442/Om**(1./3)/((1+z)/100))**(3./2)*(h**(-1)*1.E4)
mmin = pb.virial_mass(Tvir,z,**cosmo)
print "minimum mass (msuns)", mmin
RLmin = m2R(mmin)
print 'R',RLmin
#rlmin = pb.mass_to_radius(mmin,**cosmo)
#print RLmin, rlmin #==
#smin = sig0(RLmin,Del2k0)
smin = sig0(RLmin)
print 'smin=',smin
#######
S0max = sig0(m2R(zeta*mmin))
S0 = n.arange(0,S0max,0.2)
BFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZH0 = deltac-K*n.sqrt(2*smin)
bFZH1 = K/n.sqrt(2*smin)
BFZHlin = bFZH0+bFZH1*S0
p.plot(S0,BFZH,'b')
p.plot(S0,BFZHlin,'b.-')
M0 = zeta*mmin*2
del0 = 5.
#print quad(lambda m: integrand(del0,m,M0,12.)*dsdm(m),mmin,M0)
tplquad(All,mmin,M0,lambda x: 0, lambda x: 5., lambda x,y: gam(m2R(x))*y,lambda x,y: 10.,args=(del0,M0,z))
p.show()
|
|
import sys as __sys
import requests as __requests
# from Bio import Seq as Seq
from Bio import SeqIO as __SeqIO
from Bio import SeqRecord as __SeqRecord
import StringIO as __StringIO
import re as __re
import pandas as __pd
import numpy as __np
__web_request_status_collection = {200: "The request was processed successfully.",
400: "Bad request. There is a problem with your input.",
404: "Not found. The resource you requested doesnt exist.",
410: "Gone. The resource you requested was removed.",
500: "Internal server error. Most likely a temporary problem, but if the problem persists please contact us.",
503: "Service not available. The server is being updated, try again later."}
# # typical Uniprot ID (for isoform 2) ...
# uid = "P16870-2"
def get_uniprot(session,uid,seq_format='fasta'):
""" see http://www.uniprot.org/help/programmatic_access for details """
# treat missing or inknown data fairly ...
if uid is None:
return None
# the way we form Uniprot ID request ...
get_uid_url = lambda _: "http://www.uniprot.org/uniprot/%s.fasta"%_
# web request for a given uid ...
uid_url = get_uid_url(uid)
# make a request ...
req_res = session.get(uid_url)
# check request status ...
if req_res.status_code==200 and bool(req_res.content):
# to be read by __SeqIO ...
string_as_handle = __StringIO.StringIO(req_res.content)
seq_rec = __SeqIO.read(string_as_handle,seq_format)
return seq_rec
elif req_res.status_code==200:
print __web_request_status_collection[req_res.status_code]
print "... But, the content is empty for accession number %s!"%uid
__sys.exit(1)
elif req_res.status_code in __web_request_status_collection:
print __web_request_status_collection[req_res.status_code]
__sys.exit(1)
else:
print "Unknown status code returned!"
__sys.exit(1)
def stupid_aligner(peptide,protein):
""" 1-based number of peptide occurance in the protein ..."""
# small subfunction to get the hamming distance ...
def hamming_distance(seq1,seq2):
assert len(seq1)==len(seq2)
mismatches = sum( (l1!=l2) for l1,l2 in zip(seq1,seq2) )
return mismatches
# treat missing data fairly ...
if protein is None:
return None
# sequences to strings, making sure they are SeqRec or Seq entyties ...
peptide_str = str(peptide.seq) if type(peptide)==__SeqRecord.SeqRecord else str(peptide)
protein_str = str(protein.seq) if type(protein)==__SeqRecord.SeqRecord else str(protein)
# lengths ...
pept_len = len(peptide_str)
prot_len = len(protein_str)
# stupid alignment ...
min_mismatch = prot_len
align_frame = 0
for f in range(prot_len - pept_len + 1):
prot_substring = protein_str[f:f+pept_len]
delta_hd = hamming_distance(peptide_str,prot_substring)
# in case perfect alignment is found ...
if delta_hd == 0:
align_frame = f
return align_frame
# or keep searching minimum mismatch alignment ...
if delta_hd < min_mismatch:
align_frame, min_mismatch = f, delta_hd
# make a verbose report after the whole protein was scanned ...
print "Beware! Best alignment found has %d mismatches for peptide %s"%(min_mismatch,peptide_str)
# Here we're enforcing the 1-based indexing ...
return align_frame + 1
# modifications can come in various forms ...
# n4: Deamidated:18O(1) (+2.99)
# Deamidated:18O(1) (+3)
def parse_spectrum_modifications(modifier):
# print modifier
loc_mod = modifier.strip()
loc_mod = loc_mod.split(' ')
if len(loc_mod)==3:
mod_type = loc_mod[0].strip(':')
# aa modified and peptide position ...
# POSITIONS ARE MOST LIKELY TO HAVE 1-BASED INDEX ...
mod_type_aa, mod_type_pos = mod_type[0], int(mod_type[1:])
# value ...
mod_val = float(loc_mod[2].strip('()'))
#
return (mod_type_aa, mod_type_pos, mod_val)
elif len(loc_mod)==2:
mod_val = float(loc_mod[1].strip('()'))
#
return (None, None, mod_val)
else:
print "Unknown format of modification description: %s"%modifier
sys.exit(1)
# protein name example:
# sp|P04439|1A03_HUMAN HLA class I histocompatibility antigen, A-3 alpha chain OS=Homo sapiens GN=HLA-A PE=1 SV=2
# regexp to use often:
# first part of full protein name with Uid and Locus ...
__left_part = __re.compile("[a-z]{2}\|[A-Z0-9\-\_\.\s]+\|[A-Z0-9\_]+")
# features like OS,GN etc, simply to extract what fatures are present ...
__feature_types = __re.compile("[A-Z]{2}=")
def parse_prot_name(prot_name,verbose=True):
"""Functions parses provided protein name and returns a dict with: uid,locus,prot_name,GeneName,OrgSource
-------------------------
prot name expected format:
'sp|P04439|1A03_HUMAN HLA class I histocompatibility antigen, A-3 alpha chain OS=Homo sapiens GN=HLA-A PE=1 SV=2'"""
#
dict_to_return = {}
#
# GO THROUGH FEATURES, like OS=Homo sapiens GN=HLA-A PE=1 SV=2 ...
# what features are present ...
f_types = [ f.strip('=') for f in __feature_types.findall(prot_name) ]
if f_types:
# generate regexp based on the combination of features:
# for example: "GN=(.+)PE=(.+)SV=(.+)"
f_pattern = ''.join(['%s=(.+)'%f for f in f_types])
# find the whole pattern in the protein name:
f_values, = __re.findall(f_pattern,prot_name)
# right features part for stripping ...
rp_extracted = ''.join( "%s=%s"%(k,v) for k,v in zip(f_types,f_values) )
else:
rp_extracted = ''
# store everything in an f_dict ...
f_dict = dict( zip(f_types,f_values) ) if f_types else {}
#
#
# extract left most part (if it's extractable)...:
lp_extracted = __left_part.findall(prot_name)
if len(lp_extracted)!=1:
if verbose:
print "Could not match left part of protein name: %s"%prot_name
print "Extraction result is: ", lp_extracted
lp_extracted = ""
else:
lp_extracted, = lp_extracted
_,uid,locus = lp_extracted.split('|')
dict_to_return['uid'] = uid.strip()
dict_to_return['locus'] = locus.strip()
#
# strip left and right part of the full prot name, to get the human readable
prot_name_extracted = prot_name.replace(lp_extracted,'').replace(rp_extracted,'').strip()
dict_to_return['prot_name'] = prot_name_extracted.strip()
#
# returning all extracted information ...
if ('GN' not in f_types)or('OS' not in f_types) :
if verbose:
print "There is no GeneName or OrganismSource in the protein name: %s"%prot_name
print "Feature types extracted are: ",f_types
else:
dict_to_return['GN'] = f_dict['GN'].strip()
dict_to_return['OS'] = f_dict['OS'].strip()
# returning the result regardless of the internals ...
return dict_to_return
########################################################################################################################
# READING GENEBANK CAREFULLY ...
#########################################################
import warnings as __warnings
from Bio import BiopythonWarning as __BiopythonWarning
from Bio import BiopythonParserWarning as __BiopythonParserWarning
def __fix_str(input_str,known_errors,known_fixes):
"""function that would be replacing known error strings in the input by the known fix replacement"""
fixed_input = str(input_str)
for err,fix in zip(known_errors,known_fixes):
fixed_input = fixed_input.replace(err,fix)
return fixed_input
########################################################################################################################
def __fix_genbank(error_msg,genbank_fname):
error_pattern = "(\d+\^\d+)"
known_errors = __re.findall(error_pattern,str(error_msg))
known_fixes = [err.replace('^','..') for err in known_errors]
#
if known_errors:
with open(genbank_fname,'r') as fp:
file_string_io = [ __fix_str(line,known_errors,known_fixes) for line in fp ]
file_string_io = __StringIO.StringIO(''.join(file_string_io))
print "Error if fixed locally, proceeding ..."
return file_string_io
else:
print "Error in the genbank could not be resolved. Termination"
__sys.exit(1)
########################################################################################################################
# READING SEQUENCES FROM THE FILE ...
def genebank_fix_n_read(gb_fname,key_func_type='gi'):
"""locations formatted as '1^593' cause BioPython error while reading genbanks ...
We are addressing that by fixing genbank source on the fly ..."""
print "Reading %s with genebank records from the NCBI fetch ..."%gb_fname
# choose the key-function based on the 'key_func_type' argument:
if key_func_type == 'gi':
key_function=lambda rec: rec.annotations['gi']
if key_func_type == 'id':
key_function=lambda rec: rec.id
print "Using %s as a key."%key_func_type
#
with __warnings.catch_warnings():
# e.g. BiopythonParserWarning: Dropping bond qualifier in feature location
#
__warnings.simplefilter("ignore", __BiopythonParserWarning)
#
gb_recs_iter = __SeqIO.parse(gb_fname,'gb')
try:
gbrecs = __SeqIO.to_dict( gb_recs_iter, key_function=key_function )
except ValueError, er_msg:
print "Catched ValueError: %s"%str(er_msg)
# #
# Invalid between location '1^593'
# Try to fix that thing, by replacing '^' with '..'
file_string_io = __fix_genbank(er_msg,gb_fname)
gb_recs_iter = __SeqIO.parse(file_string_io,'gb')
gbrecs = __SeqIO.to_dict( gb_recs_iter, key_function=key_function )
return gbrecs########################################################################################################################
# READING GENEBANK CAREFULLY ...
#########################################################
###########################################################################
# STAGE 2 FUNCTIONS AND METHODS ...
###########################################################################
gbrecs = None
########################################################################################################################
def get_enzyme(sample_cat):
if __pd.notnull(sample_cat):
if 'Try' in sample_cat:
return 'T'
elif 'Glu' in sample_cat:
return 'G'
else:
return None
else:
return None
######################################################################################################
__yn_map = {True:'Y',False:'N'}
######################################################################################################
def get_tm_span(fidx):
if __pd.notnull(fidx):
try:
fidx_str = str(int(fidx))
except:
fidx_str = str(fidx)
feats_descr = []
for feat in gbrecs[fidx_str].features:
quals = feat.qualifiers
feats_descr.append( __yn_map["Transmembrane" in ''.join(quals['region_name'])] if ('region_name' in quals) else None )
return 'Y' if ('Y' in feats_descr) else 'N'
else:
return None
######################################################################################################
def get_genename(fidx):
if __pd.notnull(fidx):
try:
fidx_str = str(int(fidx))
except:
fidx_str = str(fidx)
fid_features = gbrecs[fidx_str].features
if 'gene' in fid_features[1].qualifiers:
return fid_features[1].qualifiers['gene'][0]
else:
for feat in fid_features:
if 'gene' in feat.qualifiers:
return feat.qualifiers['gene'][0]
# if GN wasn't found in any of the features, return None ...
return None
else:
return None
######################################################################################################
def get_signal(fidx):
if __pd.notnull(fidx):
try:
fidx_str = str(int(fidx))
except:
fidx_str = str(fidx)
feats_descr = []
for feat in gbrecs[fidx_str].features:
quals = feat.qualifiers
feats_descr.append( __yn_map['Signal' in quals['region_name']] if ('region_name' in quals) else None )
return 'Y' if ('Y' in feats_descr) else 'N'
else:
return None
######################################################################################################
# to be edited to trun into feature locator ...
def get_signal_loc(fidx):
if __pd.notnull(fidx):
try:
fidx_str = str(int(fidx))
except:
fidx_str = str(fidx)
for feat in gbrecs[fidx_str].features:
quals = feat.qualifiers
if ('region_name' in quals):
if 'Signal' in quals['region_name']:
# start,end = (feat.location.start.position+1, feat.location.end.position)
return "%d..%d"%(feat.location.start.position+1, feat.location.end.position)
return None
else:
return None
######################################################################################################
# NEW TO BE TESTED ...
def get_topo(fidx):
def extract_topo_info(quals):
if 'note' in quals:
# Cytoplasmic or Extracellular
if "Cytoplasmic" in ' '.join(quals['note']):
return 'Cytoplasmic'
elif "Extracellular" in ' '.join(quals['note']):
return 'Extracellular'
elif "Lumenal" in ' '.join(quals['note']):
return 'Lumenal'
elif "Mitochondrial" in ' '.join(quals['note']):
return 'Mitochondrial'
elif "Nuclear" in ' '.join(quals['note']):
return 'Nuclear'
elif "Perinuclear" in ' '.join(quals['note']):
return 'Perinuclear'
elif "Vacuolar" in ' '.join(quals['note']):
return 'Vacuolar'
else:
print "Unidentified localization of topo domain %s"%str(quals)
return None
else:
print "topo domain has no note, quals: %s"%str(quals)
return None
# seems to be working ok ...
if __pd.notnull(fidx):
try:
fidx_str = str(int(fidx))
except:
fidx_str = str(fidx)
# at first, simply survey all Regions, and record all topo-domains ...
topo_domains = {}
for feat_id,feat in enumerate(gbrecs[fidx_str].features):
quals = feat.qualifiers
if 'region_name' in quals:
if "Topological domain" in ''.join(quals['region_name']):
start = feat.location.start.position+1
end = feat.location.end.position
topo_domains[(start,end)] = feat_id
#################################################################
if not topo_domains:
print "No Topological domains detcted for %s"%fidx_str
return __pd.Series({'N-term':None,'C-term':None})
else:
topo_domains_description = {}
N_key = min(topo_domains)
C_key = max(topo_domains)
N_domain_info = gbrecs[fidx_str].features[ topo_domains[N_key] ].qualifiers
C_domain_info = gbrecs[fidx_str].features[ topo_domains[C_key] ].qualifiers
topo_domains_description['N-term'] = extract_topo_info(N_domain_info)
topo_domains_description['C-term'] = extract_topo_info(C_domain_info)
#####################################################
return __pd.Series(topo_domains_description)
else:
return None
######################################################################################################
def get_all_tms(fidx):
# seems to be working fine ...
if __pd.notnull(fidx):
try:
fidx_str = str(int(fidx))
except:
fidx_str = str(fidx)
tm_locs = []
for feat in gbrecs[fidx_str].features:
quals = feat.qualifiers
if 'region_name' in quals:
if "Transmembrane" in ''.join(quals['region_name']):
start = feat.location.start.position+1
end = feat.location.end.position
tm_locs.append("%d..%d"%(start,end))
return __pd.Series({'TM_num':len(tm_locs),'TM_locs':','.join(tm_locs)}) if tm_locs else __pd.Series({'TM_num':None,'TM_locs':None})
else:
return __pd.Series({'TM_num':None,'TM_locs':None})
######################################################################################################
# THAT WHAT IS REMAINING ...
# 5. Closest TM boundaries to g-site. Here, I want the closest TM boundary on both sides (if present).
# For example, entry 5 on the output example, has a g site at 212, and TM spans at 133..153 and 225..245 that flank this site.
# 153 is the closest N-terminal boundary, 225 is the closest C-terminal boundary.
# For proteins with a single TM span (lines 2 and 3 of sample output file, one of these output columns will be empty.
######################################################################################################
#######################################################################################################
########################################################################################################################
def pept_isin(row):
pept,fetchid = row
if __pd.notnull(fetchid):
try:
fidx_str = str(int(fetchid))
except:
fidx_str = str(fetchid)
prot_seq = gbrecs[fidx_str].seq
return (pept in prot_seq)
else:
None
##############################
########################################################################################################################
def pept_info(row):
pept,fetchid = row
if __pd.notnull(fetchid):
try:
fidx_str = str(int(fetchid))
except:
fidx_str = str(fetchid)
prot_seq = gbrecs[fidx_str].seq
# find pept in prot:
pept_found = prot_seq.find(pept)
if pept_found > -1:
# 1-based indexing right away ...
start = prot_seq.find(pept) + 1
stop = prot_seq.find(pept) + len(pept)
# because of 1-based indexing ...
if stop >= len(prot_seq):
print "peptide is at the end: ",pept,fidx_str
next_aa = None
else:
next_aa = prot_seq[stop]
################################
if start <= 1:
print "peptide is at the start: ",pept,fidx_str
prev_aa = None
else:
prev_aa = prot_seq[start-2]
# return 4 columns ...
return __pd.Series( {'start_fetched': start,
'stop_fetched': stop,
'prev_aa_fetched': prev_aa,
'next_aa_fetched': next_aa} )
else:
print "(!!!) peptide not found: ",pept,fidx_str
return __pd.Series({})
else:
print "fetchid is None for pept",pept
return __pd.Series({})
########################################################################################################################
########################################################################################################################
#################################################################
# STAGE 3 STUFF, manuall gsites extraction ...
#################################################################
#
#
########################################################################################################################
__g_site = __re.compile(r'(?=(N[ACDEFGHIKLMNQRSTVWY][TS]))')
#########################################################################################
def get_theor_sites_number(prot_seq):
# find all sites ...
all_sites = [(site.start(),site.groups()[0]) for site in __g_site.finditer(prot_seq)]
N_sites = len(all_sites)
return N_sites
#########################################################################################
def get_theor_sites(prot_seq):
# BEWARE ZERO-BASED INDEXING TO 1-BASED INDEXING TRANSLATION ...
# find all sites ...
all_sites = [(site.start(),site.groups()[0]) for site in __g_site.finditer(prot_seq)]
# N_sites = len(all_sites)
return ';'.join( (gsite_seq+'(%d)'%(pos+1)) for pos,gsite_seq in all_sites) # pos+1 - indexing transition ...
##################################################################################################################
#
##################################################################################################################
# looking for Deamidated sites only ...
__deamid = __re.compile('[D,d]eamidat')
def extract_deamids(mod_str,sequeon=None):
# "IADDkYnDTFWk" with modifications: "Label:13C(6)15N(2) (+8), Deamidated:18O(1) (+3), Label:13C(6)15N(2) (+8)"
mod_list = [ mod.strip() for mod in mod_str.split(',') ]
if sequeon is not None:
mod_locations = [(aa,idx+1) for idx,aa in enumerate(sequeon) if aa.islower()]
else:
mod_locations = [False for _ in mod_list]
# return pd.Series( ms.parse_spectrum_modifications(mod) for mod in mod_list if bool(deamid.search(mod)) )
# collecting results to return ...
to_return = []
for mod,mloc in zip(mod_list,mod_locations):
if bool(__deamid.search(mod)):
type_aa, gpos_pept, value = parse_spectrum_modifications(mod)
if (type_aa is None) and (gpos_pept is None) and mloc:
type_aa, gpos_pept = mloc
elif (type_aa is not None) and (gpos_pept is not None) and mloc:
assert type_aa==mloc[0]
assert gpos_pept==mloc[1]
elif (type_aa is not None) and (gpos_pept is not None) and (not mloc):
pass
else:
print "unknown case of deamidation extraction!!! mod: %s, seq: %s"%(mod_str,str(sequeon))
sys.exit(1)
# let's filter the aspartic ones with the value 3 right away ...
if (type_aa in ['n','N']) and (__np.abs(value-3)<0.01):
to_return.append( (type_aa, gpos_pept, value) )
return __pd.Series( to_return )
##################################################################################################################
# unroll/exapnd spec table to account for multiple deamid-sites/gsites per peptide ...
def unroll_by_mfunc(df,col_names,mfunc,unrolled_colname='unrolled'):
# get the column with arguments of the 'mfunc' ...
thecolumns = df[col_names]
if type(col_names) == list:
# it appears as multi-column thing right after mfunc application ...
multicol_mfunc_result = thecolumns.apply(mfunc,axis=1)
elif type(col_names) == str:
# it appears as multi-column thing right after mfunc application ...
multicol_mfunc_result = thecolumns.apply(mfunc)
else:
print "unroll_by_mfunc expected col_names as a list OR a string ..."
print "%s fails to meet these expectations. Exit"%str(col_names)
__sys.exit(1)
# stacked - multiindex is in use, level=1 index is the column name from 'multicol_mfunc_result' ...
unrolled_mindex = multicol_mfunc_result.stack()
# index is no longer uniq after the following operation ... we dropped inner indexing part.
# IMPORTANT: indexes correspond to those from the original df (use it for merging later) ...
unrolled_origindex = __pd.DataFrame(unrolled_mindex.reset_index(level=1,drop=True),columns=[unrolled_colname,])
# merge unrolled_origindex (a single column with ambiguous index) with the original df ...
# 'unrolled_origindex' must be DataFrame to merge: Series are not mergeable for some reason ...
# the whole df is to be unrolled after the following operation.
unrolled_df = df.merge(unrolled_origindex,left_index=True,right_index=True)
#
return unrolled_df.reset_index(drop=True)
##################################################################################################################
def deamid_to_gsite(deamid_mod, pept_pos, prot_seq):
type_aa, gpos_pept, value = deamid_mod
gpos_pept = int(gpos_pept)
pept_pos = int(pept_pos)
assert type_aa in ['n','N']
assert __np.abs(value-3)<0.01
# 'pept_pos' - is 1-based absolute poisition of the peptide in the protein ...
# 'gpos_pept' - is 1-based relative position of gsite_start_N in the peptide ...
gsite_start = pept_pos + gpos_pept-1 # 1-based coordinate ...
gsite_stop = pept_pos + gpos_pept-1 + 3-1 # 1-based coordinate ...
# Due to slicing rules, we need [start-1:stop], no have position 'stop' included ...
gsite_seq = prot_seq[gsite_start-1:gsite_stop]
############################################################
# gstart must be 1-based for output ...
return {'gsite':"%s(%d)"%(gsite_seq,gsite_start), 'gsite_seq':gsite_seq, 'gstart':gsite_start}
#########################################################################################
#
####################################################################################
## Code ## Description
####################################################################################
## 200 ## The request was processed successfully.
## 400 ## Bad request. There is a problem with your input.
## 404 ## Not found. The resource you requested doesnt exist.
## 410 ## Gone. The resource you requested was removed.
## 500 ## Internal server error. Most likely a temporary problem, but if the problem persists please contact us.
## 503 ## Service not available. The server is being updated, try again later.
######################################################################################
if __name__ == "__main__":
pass
|
|
##########################################################################
#
# Copyright (c) 2013-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Image Engine Design nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
import Gaffer
import GafferImage
import GafferImageTest
class SamplerTest( GafferImageTest.ImageTestCase ) :
fileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker.exr" )
def testOutOfBoundsSampleModeBlack( self ) :
c = GafferImage.Constant()
c["color"].setValue( IECore.Color4f( 1 ) )
dw = c["out"]["dataWindow"].getValue();
s = GafferImage.Sampler( c["out"], "R", dw, GafferImage.Sampler.BoundingMode.Black )
# Check integer sampling.
#########################
# Pixels on corners of dataWindow should be white.
self.assertEqual( s.sample( dw.min.x, dw.min.y ), 1 )
self.assertEqual( s.sample( dw.max.x - 1, dw.min.y ), 1 )
self.assertEqual( s.sample( dw.max.x - 1, dw.max.y - 1 ), 1 )
self.assertEqual( s.sample( dw.min.x, dw.max.y - 1 ), 1 )
# Pixels just outside dataWindow should be white.
self.assertEqual( s.sample( dw.min.x - 1, dw.min.y - 1 ), 0 )
self.assertEqual( s.sample( dw.max.x, dw.min.y - 1 ), 0 )
self.assertEqual( s.sample( dw.max.x, dw.max.y ), 0 )
self.assertEqual( s.sample( dw.min.x - 1, dw.max.y ), 0 )
# Check interpolated sampling.
##############################
# Pixels on corners of dataWindow should be interpolating
# to black. Note that here we're sampling at the corners
# of pixels, not centers.
self.assertEqual( s.sample( float( dw.min.x ), float( dw.min.y ) ), 0.25 )
self.assertEqual( s.sample( float( dw.max.x ), float( dw.min.y ) ), 0.25 )
self.assertEqual( s.sample( float( dw.max.x ), float( dw.max.y ) ), 0.25 )
self.assertEqual( s.sample( float( dw.min.x ), float( dw.max.y ) ), 0.25 )
# Pixel centers at the corners of dataWindow should be white.
self.assertEqual( s.sample( float( dw.min.x + 0.5 ), float( dw.min.y + 0.5 ) ), 1 )
self.assertEqual( s.sample( float( dw.max.x - 0.5 ), float( dw.min.y + 0.5 ) ), 1 )
self.assertEqual( s.sample( float( dw.max.x - 0.5 ), float( dw.max.y - 0.5 ) ), 1 )
self.assertEqual( s.sample( float( dw.min.x + 0.5 ), float( dw.max.y - 0.5 ) ), 1 )
def testOutOfBoundsSampleModeClamp( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.fileName )
dw = r["out"]["dataWindow"].getValue();
s = GafferImage.Sampler( r["out"], "R", dw, GafferImage.Sampler.BoundingMode.Clamp )
# Get the exact values of the corner pixels.
bl = s.sample( dw.min.x, dw.min.y )
br = s.sample( dw.max.x - 1, dw.min.y )
tr = s.sample( dw.max.x - 1, dw.max.y - 1 )
tl = s.sample( dw.min.x, dw.max.y - 1 )
# Sample out of bounds and assert that the same value as the nearest pixel is returned.
self.assertEqual( s.sample( dw.min.x-1, dw.min.y ), bl )
self.assertEqual( s.sample( dw.min.x, dw.min.y-1 ), bl )
self.assertEqual( s.sample( dw.max.x-1, dw.max.y ), tr )
self.assertEqual( s.sample( dw.max.x, dw.max.y-1 ), tr )
self.assertEqual( s.sample( dw.min.x-1, dw.max.y-1 ), tl )
self.assertEqual( s.sample( dw.min.x, dw.max.y ), tl )
self.assertEqual( s.sample( dw.max.x, dw.min.y ), br )
self.assertEqual( s.sample( dw.max.x-1, dw.min.y-1 ), br )
def test2x2Checker( self ) :
reader = GafferImage.ImageReader()
reader["fileName"].setValue( os.path.dirname( __file__ ) + "/images/checker2x2.exr" )
# As long as the sample region includes the valid range of our image, and all
# the pixels we're going to request, it should have no effect on our sampling.
# So test with a few such ranges.
sampleRegions = [
IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( GafferImage.ImagePlug.tileSize() ) ),
IECore.Box2i( -IECore.V2i( GafferImage.ImagePlug.tileSize() ), IECore.V2i( GafferImage.ImagePlug.tileSize() ) ),
IECore.Box2i( IECore.V2i( -1 ), IECore.V2i( 4 ) ),
]
# List of positions inside and outside of the image, along
# with expected values if outside points are clamped inside.
samples = [
( IECore.V2i( 0, 0 ), 1 ),
( IECore.V2i( 1, 0 ), 0 ),
( IECore.V2i( 1, 1 ), 1 ),
( IECore.V2i( 0, 1 ), 0 ),
( IECore.V2i( -1, 0 ), 1 ),
( IECore.V2i( 2, 0 ), 0 ),
( IECore.V2i( 0, 3 ), 0 ),
( IECore.V2i( 0, -1 ), 1 ),
( IECore.V2i( 3, 3 ), 1 ),
( IECore.V2i( -1, -1 ), 1 ),
( IECore.V2i( -1, 2 ), 0 ),
( IECore.V2i( 2, 2 ), 1 ),
( IECore.V2f( 1, 1 ), 0.5 ),
]
# Assert all is as expected for all combos of region and sample.
for region in sampleRegions :
sampler = GafferImage.Sampler( reader["out"], "R", region, boundingMode = GafferImage.Sampler.BoundingMode.Clamp )
for position, value in samples :
self.assertEqual( sampler.sample( position.x, position.y ), value )
def testSampleOutsideDataWindow( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 1000, 1000 ) )
constant["color"].setValue( IECore.Color4f( 1 ) )
crop = GafferImage.Crop()
crop["in"].setInput( constant["out"] )
crop["areaSource"].setValue( crop.AreaSource.Area )
crop["area"].setValue( IECore.Box2i( IECore.V2i( 135 ), IECore.V2i( 214 ) ) )
crop["affectDisplayWindow"].setValue( False )
sampler = GafferImage.Sampler( crop["out"], "R", IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 50 ) ), boundingMode = GafferImage.Sampler.BoundingMode.Clamp )
self.assertEqual( sampler.sample( 0, 0 ), 1 )
sampler = GafferImage.Sampler( crop["out"], "R", IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 50 ) ), boundingMode = GafferImage.Sampler.BoundingMode.Black )
self.assertEqual( sampler.sample( 0, 0 ), 0 )
def testHashIncludesBlackPixels( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 1000, 1000 ) )
constant["color"].setValue( IECore.Color4f( 1 ) )
crop = GafferImage.Crop()
crop["in"].setInput( constant["out"] )
crop["areaSource"].setValue( crop.AreaSource.Area )
crop["area"].setValue( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 200 ) ) )
crop["affectDisplayWindow"].setValue( False )
crop["affectDataWindow"].setValue( False )
# Samples the whole data window
sampler1 = GafferImage.Sampler( crop["out"], "R", IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 200 ) ), boundingMode = GafferImage.Sampler.BoundingMode.Black )
# Samples the whole data window and then some.
sampler2 = GafferImage.Sampler( crop["out"], "R", IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 210 ) ), boundingMode = GafferImage.Sampler.BoundingMode.Black )
# Samples the whole data window and then some and then some more.
sampler3 = GafferImage.Sampler( crop["out"], "R", IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 220 ) ), boundingMode = GafferImage.Sampler.BoundingMode.Black )
# The hashes must take account of the additional pixels being sampled.
self.assertNotEqual( sampler1.hash(), sampler2.hash() )
self.assertNotEqual( sampler2.hash(), sampler3.hash() )
self.assertNotEqual( sampler3.hash(), sampler1.hash() )
def testClampModeWithEmptyDataWindow( self ) :
empty = self.emptyImage()
sampler = GafferImage.Sampler( empty["out"], "R", empty["out"]["format"].getValue().getDisplayWindow(), boundingMode = GafferImage.Sampler.BoundingMode.Clamp )
self.assertEqual( sampler.sample( 0, 0 ), 0.0 )
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import absolute_import, print_function, division
import warnings
import six
from .headers import Headers
from .. import encoding, utils
CONTENT_MISSING = 0
if six.PY2: # pragma: nocover
_native = lambda x: x
_always_bytes = lambda x: x
else:
# While the HTTP head _should_ be ASCII, it's not uncommon for certain headers to be utf-8 encoded.
_native = lambda x: x.decode("utf-8", "surrogateescape")
_always_bytes = lambda x: utils.always_bytes(x, "utf-8", "surrogateescape")
class MessageData(utils.Serializable):
def __eq__(self, other):
if isinstance(other, MessageData):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def set_state(self, state):
for k, v in state.items():
if k == "headers":
v = Headers.from_state(v)
setattr(self, k, v)
def get_state(self):
state = vars(self).copy()
state["headers"] = state["headers"].get_state()
return state
@classmethod
def from_state(cls, state):
state["headers"] = Headers.from_state(state["headers"])
return cls(**state)
class Message(utils.Serializable):
def __init__(self, data):
self.data = data
def __eq__(self, other):
if isinstance(other, Message):
return self.data == other.data
return False
def __ne__(self, other):
return not self.__eq__(other)
def get_state(self):
return self.data.get_state()
def set_state(self, state):
self.data.set_state(state)
@classmethod
def from_state(cls, state):
return cls(**state)
@property
def headers(self):
"""
Message headers object
Returns:
netlib.http.Headers
"""
return self.data.headers
@headers.setter
def headers(self, h):
self.data.headers = h
@property
def content(self):
"""
The raw (encoded) HTTP message body
See also: :py:attr:`text`
"""
return self.data.content
@content.setter
def content(self, content):
self.data.content = content
if isinstance(content, bytes):
self.headers["content-length"] = str(len(content))
def expect_content(self):
raise NotImplementedError()
@property
def http_version(self):
"""
Version string, e.g. "HTTP/1.1"
"""
return _native(self.data.http_version)
@http_version.setter
def http_version(self, http_version):
self.data.http_version = _always_bytes(http_version)
@property
def timestamp_start(self):
"""
First byte timestamp
"""
return self.data.timestamp_start
@timestamp_start.setter
def timestamp_start(self, timestamp_start):
self.data.timestamp_start = timestamp_start
@property
def timestamp_end(self):
"""
Last byte timestamp
"""
return self.data.timestamp_end
@timestamp_end.setter
def timestamp_end(self, timestamp_end):
self.data.timestamp_end = timestamp_end
@property
def text(self):
"""
The decoded HTTP message body.
Decoded contents are not cached, so accessing this attribute repeatedly is relatively expensive.
.. note::
This is not implemented yet.
See also: :py:attr:`content`, :py:class:`decoded`
"""
# This attribute should be called text, because that's what requests does.
raise NotImplementedError()
@text.setter
def text(self, text):
raise NotImplementedError()
def decode(self):
"""
Decodes body based on the current Content-Encoding header, then
removes the header. If there is no Content-Encoding header, no
action is taken.
Returns:
True, if decoding succeeded.
False, otherwise.
"""
ce = self.headers.get("content-encoding")
data = encoding.decode(ce, self.content)
if data is None:
return False
self.content = data
self.headers.pop("content-encoding", None)
return True
def encode(self, e):
"""
Encodes body with the encoding e, where e is "gzip", "deflate" or "identity".
Returns:
True, if decoding succeeded.
False, otherwise.
"""
data = encoding.encode(e, self.content)
if data is None:
return False
self.content = data
self.headers["content-encoding"] = e
return True
# Legacy
@property
def body(self): # pragma: nocover
warnings.warn(".body is deprecated, use .content instead.", DeprecationWarning)
return self.content
@body.setter
def body(self, body): # pragma: nocover
warnings.warn(".body is deprecated, use .content instead.", DeprecationWarning)
self.content = body
class decoded(object):
"""
A context manager that decodes a request or response, and then
re-encodes it with the same encoding after execution of the block.
Example:
.. code-block:: python
with decoded(request):
request.content = request.content.replace("foo", "bar")
"""
def __init__(self, message):
self.message = message
ce = message.headers.get("content-encoding")
if ce in encoding.ENCODINGS:
self.ce = ce
else:
self.ce = None
def __enter__(self):
if self.ce:
self.message.decode()
def __exit__(self, type, value, tb):
if self.ce:
self.message.encode(self.ce)
|
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/tools/docco/examples.py
import string
testannotations="""
def annotations(canvas):
from reportlab.lib.units import inch
canvas.drawString(inch, 2.5*inch,
"setAuthor, setTitle, setSubject have no visible effect")
canvas.drawString(inch, inch, "But if you are viewing this document dynamically")
canvas.drawString(inch, 0.5*inch, "please look at File/Document Info")
canvas.setAuthor("the ReportLab Team")
canvas.setTitle("ReportLab PDF Generation User Guide")
canvas.setSubject("How to Generate PDF files using the ReportLab modules")
"""
# magic function making module
test1 = """
def f(a,b):
print "it worked", a, b
return a+b
"""
test2 = """
def g(n):
if n==0: return 1
else: return n*g(n-1)
"""
testhello = """
def hello(c):
from reportlab.lib.units import inch
# move the origin up and to the left
c.translate(inch,inch)
# define a large font
c.setFont("Helvetica", 14)
# choose some colors
c.setStrokeColorRGB(0.2,0.5,0.3)
c.setFillColorRGB(1,0,1)
# draw some lines
c.line(0,0,0,1.7*inch)
c.line(0,0,1*inch,0)
# draw a rectangle
c.rect(0.2*inch,0.2*inch,1*inch,1.5*inch, fill=1)
# make text go straight up
c.rotate(90)
# change color
c.setFillColorRGB(0,0,0.77)
# say hello (note after rotate the y coord needs to be negative!)
c.drawString(0.3*inch, -inch, "Hello World")
"""
testcoords = """
def coords(canvas):
from reportlab.lib.units import inch
from reportlab.lib.colors import pink, black, red, blue, green
c = canvas
c.setStrokeColor(pink)
c.grid([inch, 2*inch, 3*inch, 4*inch], [0.5*inch, inch, 1.5*inch, 2*inch, 2.5*inch])
c.setStrokeColor(black)
c.setFont("Times-Roman", 20)
c.drawString(0,0, "(0,0) the Origin")
c.drawString(2.5*inch, inch, "(2.5,1) in inches")
c.drawString(4*inch, 2.5*inch, "(4, 2.5)")
c.setFillColor(red)
c.rect(0,2*inch,0.2*inch,0.3*inch, fill=1)
c.setFillColor(green)
c.circle(4.5*inch, 0.4*inch, 0.2*inch, fill=1)
"""
testtranslate = """
def translate(canvas):
from reportlab.lib.units import cm
canvas.translate(2.3*cm, 0.3*cm)
coords(canvas)
"""
testscale = """
def scale(canvas):
canvas.scale(0.75, 0.5)
coords(canvas)
"""
testscaletranslate = """
def scaletranslate(canvas):
from reportlab.lib.units import inch
canvas.setFont("Courier-BoldOblique", 12)
# save the state
canvas.saveState()
# scale then translate
canvas.scale(0.3, 0.5)
canvas.translate(2.4*inch, 1.5*inch)
canvas.drawString(0, 2.7*inch, "Scale then translate")
coords(canvas)
# forget the scale and translate...
canvas.restoreState()
# translate then scale
canvas.translate(2.4*inch, 1.5*inch)
canvas.scale(0.3, 0.5)
canvas.drawString(0, 2.7*inch, "Translate then scale")
coords(canvas)
"""
testmirror = """
def mirror(canvas):
from reportlab.lib.units import inch
canvas.translate(5.5*inch, 0)
canvas.scale(-1.0, 1.0)
coords(canvas)
"""
testcolors = """
def colors(canvas):
from reportlab.lib import colors
from reportlab.lib.units import inch
black = colors.black
y = x = 0; dy=inch*3/4.0; dx=inch*5.5/5; w=h=dy/2; rdx=(dx-w)/2
rdy=h/5.0; texty=h+2*rdy
canvas.setFont("Helvetica",10)
for [namedcolor, name] in (
[colors.lavenderblush, "lavenderblush"],
[colors.lawngreen, "lawngreen"],
[colors.lemonchiffon, "lemonchiffon"],
[colors.lightblue, "lightblue"],
[colors.lightcoral, "lightcoral"]):
canvas.setFillColor(namedcolor)
canvas.rect(x+rdx, y+rdy, w, h, fill=1)
canvas.setFillColor(black)
canvas.drawCentredString(x+dx/2, y+texty, name)
x = x+dx
y = y + dy; x = 0
for rgb in [(1,0,0), (0,1,0), (0,0,1), (0.5,0.3,0.1), (0.4,0.5,0.3)]:
r,g,b = rgb
canvas.setFillColorRGB(r,g,b)
canvas.rect(x+rdx, y+rdy, w, h, fill=1)
canvas.setFillColor(black)
canvas.drawCentredString(x+dx/2, y+texty, "r%s g%s b%s"%rgb)
x = x+dx
y = y + dy; x = 0
for cmyk in [(1,0,0,0), (0,1,0,0), (0,0,1,0), (0,0,0,1), (0,0,0,0)]:
c,m,y1,k = cmyk
canvas.setFillColorCMYK(c,m,y1,k)
canvas.rect(x+rdx, y+rdy, w, h, fill=1)
canvas.setFillColor(black)
canvas.drawCentredString(x+dx/2, y+texty, "c%s m%s y%s k%s"%cmyk)
x = x+dx
y = y + dy; x = 0
for gray in (0.0, 0.25, 0.50, 0.75, 1.0):
canvas.setFillGray(gray)
canvas.rect(x+rdx, y+rdy, w, h, fill=1)
canvas.setFillColor(black)
canvas.drawCentredString(x+dx/2, y+texty, "gray: %s"%gray)
x = x+dx
"""
testspumoni = """
def spumoni(canvas):
from reportlab.lib.units import inch
from reportlab.lib.colors import pink, green, brown, white
x = 0; dx = 0.4*inch
for i in range(4):
for color in (pink, green, brown):
canvas.setFillColor(color)
canvas.rect(x,0,dx,3*inch,stroke=0,fill=1)
x = x+dx
canvas.setFillColor(white)
canvas.setStrokeColor(white)
canvas.setFont("Helvetica-Bold", 85)
canvas.drawCentredString(2.75*inch, 1.3*inch, "SPUMONI")
"""
testspumoni2 = """
def spumoni2(canvas):
from reportlab.lib.units import inch
from reportlab.lib.colors import pink, green, brown, white, black
# draw the previous drawing
spumoni(canvas)
# now put an ice cream cone on top of it:
# first draw a triangle (ice cream cone)
p = canvas.beginPath()
xcenter = 2.75*inch
radius = 0.45*inch
p.moveTo(xcenter-radius, 1.5*inch)
p.lineTo(xcenter+radius, 1.5*inch)
p.lineTo(xcenter, 0)
canvas.setFillColor(brown)
canvas.setStrokeColor(black)
canvas.drawPath(p, fill=1)
# draw some circles (scoops)
y = 1.5*inch
for color in (pink, green, brown):
canvas.setFillColor(color)
canvas.circle(xcenter, y, radius, fill=1)
y = y+radius
"""
testbezier = """
def bezier(canvas):
from reportlab.lib.colors import yellow, green, red, black
from reportlab.lib.units import inch
i = inch
d = i/4
# define the bezier curve control points
x1,y1, x2,y2, x3,y3, x4,y4 = d,1.5*i, 1.5*i,d, 3*i,d, 5.5*i-d,3*i-d
# draw a figure enclosing the control points
canvas.setFillColor(yellow)
p = canvas.beginPath()
p.moveTo(x1,y1)
for (x,y) in [(x2,y2), (x3,y3), (x4,y4)]:
p.lineTo(x,y)
canvas.drawPath(p, fill=1, stroke=0)
# draw the tangent lines
canvas.setLineWidth(inch*0.1)
canvas.setStrokeColor(green)
canvas.line(x1,y1,x2,y2)
canvas.setStrokeColor(red)
canvas.line(x3,y3,x4,y4)
# finally draw the curve
canvas.setStrokeColor(black)
canvas.bezier(x1,y1, x2,y2, x3,y3, x4,y4)
"""
testbezier2 = """
def bezier2(canvas):
from reportlab.lib.colors import yellow, green, red, black
from reportlab.lib.units import inch
# make a sequence of control points
xd,yd = 5.5*inch/2, 3*inch/2
xc,yc = xd,yd
dxdy = [(0,0.33), (0.33,0.33), (0.75,1), (0.875,0.875),
(0.875,0.875), (1,0.75), (0.33,0.33), (0.33,0)]
pointlist = []
for xoffset in (1,-1):
yoffset = xoffset
for (dx,dy) in dxdy:
px = xc + xd*xoffset*dx
py = yc + yd*yoffset*dy
pointlist.append((px,py))
yoffset = -xoffset
for (dy,dx) in dxdy:
px = xc + xd*xoffset*dx
py = yc + yd*yoffset*dy
pointlist.append((px,py))
# draw tangent lines and curves
canvas.setLineWidth(inch*0.1)
while pointlist:
[(x1,y1),(x2,y2),(x3,y3),(x4,y4)] = pointlist[:4]
del pointlist[:4]
canvas.setLineWidth(inch*0.1)
canvas.setStrokeColor(green)
canvas.line(x1,y1,x2,y2)
canvas.setStrokeColor(red)
canvas.line(x3,y3,x4,y4)
# finally draw the curve
canvas.setStrokeColor(black)
canvas.bezier(x1,y1, x2,y2, x3,y3, x4,y4)
"""
testpencil = """
def pencil(canvas, text="No.2"):
from reportlab.lib.colors import yellow, red, black,white
from reportlab.lib.units import inch
u = inch/10.0
canvas.setStrokeColor(black)
canvas.setLineWidth(4)
# draw erasor
canvas.setFillColor(red)
canvas.circle(30*u, 5*u, 5*u, stroke=1, fill=1)
# draw all else but the tip (mainly rectangles with different fills)
canvas.setFillColor(yellow)
canvas.rect(10*u,0,20*u,10*u, stroke=1, fill=1)
canvas.setFillColor(black)
canvas.rect(23*u,0,8*u,10*u,fill=1)
canvas.roundRect(14*u, 3.5*u, 8*u, 3*u, 1.5*u, stroke=1, fill=1)
canvas.setFillColor(white)
canvas.rect(25*u,u,1.2*u,8*u, fill=1,stroke=0)
canvas.rect(27.5*u,u,1.2*u,8*u, fill=1, stroke=0)
canvas.setFont("Times-Roman", 3*u)
canvas.drawCentredString(18*u, 4*u, text)
# now draw the tip
penciltip(canvas,debug=0)
# draw broken lines across the body.
canvas.setDash([10,5,16,10],0)
canvas.line(11*u,2.5*u,22*u,2.5*u)
canvas.line(22*u,7.5*u,12*u,7.5*u)
"""
testpenciltip = """
def penciltip(canvas, debug=1):
from reportlab.lib.colors import tan, black, green
from reportlab.lib.units import inch
u = inch/10.0
canvas.setLineWidth(4)
if debug:
canvas.scale(2.8,2.8) # make it big
canvas.setLineWidth(1) # small lines
canvas.setStrokeColor(black)
canvas.setFillColor(tan)
p = canvas.beginPath()
p.moveTo(10*u,0)
p.lineTo(0,5*u)
p.lineTo(10*u,10*u)
p.curveTo(11.5*u,10*u, 11.5*u,7.5*u, 10*u,7.5*u)
p.curveTo(12*u,7.5*u, 11*u,2.5*u, 9.7*u,2.5*u)
p.curveTo(10.5*u,2.5*u, 11*u,0, 10*u,0)
canvas.drawPath(p, stroke=1, fill=1)
canvas.setFillColor(black)
p = canvas.beginPath()
p.moveTo(0,5*u)
p.lineTo(4*u,3*u)
p.lineTo(5*u,4.5*u)
p.lineTo(3*u,6.5*u)
canvas.drawPath(p, stroke=1, fill=1)
if debug:
canvas.setStrokeColor(green) # put in a frame of reference
canvas.grid([0,5*u,10*u,15*u], [0,5*u,10*u])
"""
testnoteannotation = """
from reportlab.platypus.flowables import Flowable
class NoteAnnotation(Flowable):
'''put a pencil in the margin.'''
def wrap(self, *args):
return (1,10) # I take up very little space! (?)
def draw(self):
canvas = self.canv
canvas.translate(-10,-10)
canvas.rotate(180)
canvas.scale(0.2,0.2)
pencil(canvas, text="NOTE")
"""
testhandannotation = """
from reportlab.platypus.flowables import Flowable
from reportlab.lib.colors import tan, green
class HandAnnotation(Flowable):
'''A hand flowable.'''
def __init__(self, xoffset=0, size=None, fillcolor=tan, strokecolor=green):
from reportlab.lib.units import inch
if size is None: size=4*inch
self.fillcolor, self.strokecolor = fillcolor, strokecolor
self.xoffset = xoffset
self.size = size
# normal size is 4 inches
self.scale = size/(4.0*inch)
def wrap(self, *args):
return (self.xoffset, self.size)
def draw(self):
canvas = self.canv
canvas.setLineWidth(6)
canvas.setFillColor(self.fillcolor)
canvas.setStrokeColor(self.strokecolor)
canvas.translate(self.xoffset+self.size,0)
canvas.rotate(90)
canvas.scale(self.scale, self.scale)
hand(canvas, debug=0, fill=1)
"""
lyrics = '''\
well she hit Net Solutions
and she registered her own .com site now
and filled it up with yahoo profile pics
she snarfed in one night now
and she made 50 million when Hugh Hefner
bought up the rights now
and she'll have fun fun fun
til her Daddy takes the keyboard away'''
lyrics = string.split(lyrics, "\n")
testtextsize = """
def textsize(canvas):
from reportlab.lib.units import inch
from reportlab.lib.colors import magenta, red
canvas.setFont("Times-Roman", 20)
canvas.setFillColor(red)
canvas.drawCentredString(2.75*inch, 2.5*inch, "Font size examples")
canvas.setFillColor(magenta)
size = 7
y = 2.3*inch
x = 1.3*inch
for line in lyrics:
canvas.setFont("Helvetica", size)
canvas.drawRightString(x,y,"%s points: " % size)
canvas.drawString(x,y, line)
y = y-size*1.2
size = size+1.5
"""
teststar = """
def star(canvas, title="Title Here", aka="Comment here.",
xcenter=None, ycenter=None, nvertices=5):
from math import pi
from reportlab.lib.units import inch
radius=inch/3.0
if xcenter is None: xcenter=2.75*inch
if ycenter is None: ycenter=1.5*inch
canvas.drawCentredString(xcenter, ycenter+1.3*radius, title)
canvas.drawCentredString(xcenter, ycenter-1.4*radius, aka)
p = canvas.beginPath()
p.moveTo(xcenter,ycenter+radius)
from math import pi, cos, sin
angle = (2*pi)*2/5.0
startangle = pi/2.0
for vertex in range(nvertices-1):
nextangle = angle*(vertex+1)+startangle
x = xcenter + radius*cos(nextangle)
y = ycenter + radius*sin(nextangle)
p.lineTo(x,y)
if nvertices==5:
p.close()
canvas.drawPath(p)
"""
testjoins = """
def joins(canvas):
from reportlab.lib.units import inch
# make lines big
canvas.setLineWidth(5)
star(canvas, "Default: mitered join", "0: pointed", xcenter = 1*inch)
canvas.setLineJoin(1)
star(canvas, "Round join", "1: rounded")
canvas.setLineJoin(2)
star(canvas, "Bevelled join", "2: square", xcenter=4.5*inch)
"""
testcaps = """
def caps(canvas):
from reportlab.lib.units import inch
# make lines big
canvas.setLineWidth(5)
star(canvas, "Default", "no projection",xcenter = 1*inch,
nvertices=4)
canvas.setLineCap(1)
star(canvas, "Round cap", "1: ends in half circle", nvertices=4)
canvas.setLineCap(2)
star(canvas, "Square cap", "2: projects out half a width", xcenter=4.5*inch,
nvertices=4)
"""
testdashes = """
def dashes(canvas):
from reportlab.lib.units import inch
# make lines big
canvas.setDash(6,3)
star(canvas, "Simple dashes", "6 points on, 3 off", xcenter = 1*inch)
canvas.setDash(1,2)
star(canvas, "Dots", "One on, two off")
canvas.setDash([1,1,3,3,1,4,4,1], 0)
star(canvas, "Complex Pattern", "[1,1,3,3,1,4,4,1]", xcenter=4.5*inch)
"""
testcustomfont1 = """
def customfont1(canvas):
# we know some glyphs are missing, suppress warnings
import reportlab.rl_config
reportlab.rl_config.warnOnMissingFontGlyphs = 0
import rl_doc_utils
from reportlab.pdfbase import pdfmetrics
afmFile, pfbFile = rl_doc_utils.getJustFontPaths()
justFace = pdfmetrics.EmbeddedType1Face(afmFile, pfbFile)
faceName = 'LettErrorRobot-Chrome' # pulled from AFM file
pdfmetrics.registerTypeFace(justFace)
justFont = pdfmetrics.Font('LettErrorRobot-Chrome',
faceName,
'WinAnsiEncoding')
pdfmetrics.registerFont(justFont)
canvas.setFont('LettErrorRobot-Chrome', 32)
canvas.drawString(10, 150, 'This should be in')
canvas.drawString(10, 100, 'LettErrorRobot-Chrome')
"""
testttffont1 = """
def ttffont1(canvas):
# we know some glyphs are missing, suppress warnings
import reportlab.rl_config
reportlab.rl_config.warnOnMissingFontGlyphs = 0
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
pdfmetrics.registerFont(TTFont('Rina', 'rina.ttf'))
from reportlab.pdfgen.canvas import Canvas
canvas.setFont('Rina', 32)
canvas.drawString(10, 150, "Some UTF-8 text encoded")
canvas.drawString(10, 100, "in the Rina TT Font!")
"""
testcursormoves1 = """
def cursormoves1(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(inch, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 14)
for line in lyrics:
textobject.textLine(line)
textobject.setFillGray(0.4)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testcursormoves2 = """
def cursormoves2(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(2, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 14)
for line in lyrics:
textobject.textOut(line)
textobject.moveCursor(14,14) # POSITIVE Y moves down!!!
textobject.setFillColorRGB(0.4,0,1)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testcharspace = """
def charspace(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(3, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 10)
charspace = 0
for line in lyrics:
textobject.setCharSpace(charspace)
textobject.textLine("%s: %s" %(charspace,line))
charspace = charspace+0.5
textobject.setFillGray(0.4)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testwordspace = """
def wordspace(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(3, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 12)
wordspace = 0
for line in lyrics:
textobject.setWordSpace(wordspace)
textobject.textLine("%s: %s" %(wordspace,line))
wordspace = wordspace+2.5
textobject.setFillColorCMYK(0.4,0,0.4,0.2)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testhorizontalscale = """
def horizontalscale(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(3, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 12)
horizontalscale = 80 # 100 is default
for line in lyrics:
textobject.setHorizScale(horizontalscale)
textobject.textLine("%s: %s" %(horizontalscale,line))
horizontalscale = horizontalscale+10
textobject.setFillColorCMYK(0.0,0.4,0.4,0.2)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testleading = """
def leading(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(3, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 14)
leading = 8
for line in lyrics:
textobject.setLeading(leading)
textobject.textLine("%s: %s" %(leading,line))
leading = leading+2.5
textobject.setFillColorCMYK(0.8,0,0,0.3)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testhand = """
def hand(canvas, debug=1, fill=0):
(startx, starty) = (0,0)
curves = [
( 0, 2), ( 0, 4), ( 0, 8), # back of hand
( 5, 8), ( 7,10), ( 7,14),
(10,14), (10,13), ( 7.5, 8), # thumb
(13, 8), (14, 8), (17, 8),
(19, 8), (19, 6), (17, 6),
(15, 6), (13, 6), (11, 6), # index, pointing
(12, 6), (13, 6), (14, 6),
(16, 6), (16, 4), (14, 4),
(13, 4), (12, 4), (11, 4), # middle
(11.5, 4), (12, 4), (13, 4),
(15, 4), (15, 2), (13, 2),
(12.5, 2), (11.5, 2), (11, 2), # ring
(11.5, 2), (12, 2), (12.5, 2),
(14, 2), (14, 0), (12.5, 0),
(10, 0), (8, 0), (6, 0), # pinky, then close
]
from reportlab.lib.units import inch
if debug: canvas.setLineWidth(6)
u = inch*0.2
p = canvas.beginPath()
p.moveTo(startx, starty)
ccopy = list(curves)
while ccopy:
[(x1,y1), (x2,y2), (x3,y3)] = ccopy[:3]
del ccopy[:3]
p.curveTo(x1*u,y1*u,x2*u,y2*u,x3*u,y3*u)
p.close()
canvas.drawPath(p, fill=fill)
if debug:
from reportlab.lib.colors import red, green
(lastx, lasty) = (startx, starty)
ccopy = list(curves)
while ccopy:
[(x1,y1), (x2,y2), (x3,y3)] = ccopy[:3]
del ccopy[:3]
canvas.setStrokeColor(red)
canvas.line(lastx*u,lasty*u, x1*u,y1*u)
canvas.setStrokeColor(green)
canvas.line(x2*u,y2*u, x3*u,y3*u)
(lastx,lasty) = (x3,y3)
"""
testhand2 = """
def hand2(canvas):
canvas.translate(20,10)
canvas.setLineWidth(3)
canvas.setFillColorRGB(0.1, 0.3, 0.9)
canvas.setStrokeGray(0.5)
hand(canvas, debug=0, fill=1)
"""
testfonts = """
def fonts(canvas):
from reportlab.lib.units import inch
text = "Now is the time for all good men to..."
x = 1.8*inch
y = 2.7*inch
for font in canvas.getAvailableFonts():
canvas.setFont(font, 10)
canvas.drawString(x,y,text)
canvas.setFont("Helvetica", 10)
canvas.drawRightString(x-10,y, font+":")
y = y-13
"""
testarcs = """
def arcs(canvas):
from reportlab.lib.units import inch
canvas.setLineWidth(4)
canvas.setStrokeColorRGB(0.8, 1, 0.6)
# draw rectangles enclosing the arcs
canvas.rect(inch, inch, 1.5*inch, inch)
canvas.rect(3*inch, inch, inch, 1.5*inch)
canvas.setStrokeColorRGB(0, 0.2, 0.4)
canvas.setFillColorRGB(1, 0.6, 0.8)
p = canvas.beginPath()
p.moveTo(0.2*inch, 0.2*inch)
p.arcTo(inch, inch, 2.5*inch,2*inch, startAng=-30, extent=135)
p.arc(3*inch, inch, 4*inch, 2.5*inch, startAng=-45, extent=270)
canvas.drawPath(p, fill=1, stroke=1)
"""
testvariousshapes = """
def variousshapes(canvas):
from reportlab.lib.units import inch
inch = int(inch)
canvas.setStrokeGray(0.5)
canvas.grid(range(0,11*inch/2,inch/2), range(0,7*inch/2,inch/2))
canvas.setLineWidth(4)
canvas.setStrokeColorRGB(0, 0.2, 0.7)
canvas.setFillColorRGB(1, 0.6, 0.8)
p = canvas.beginPath()
p.rect(0.5*inch, 0.5*inch, 0.5*inch, 2*inch)
p.circle(2.75*inch, 1.5*inch, 0.3*inch)
p.ellipse(3.5*inch, 0.5*inch, 1.2*inch, 2*inch)
canvas.drawPath(p, fill=1, stroke=1)
"""
testclosingfigures = """
def closingfigures(canvas):
from reportlab.lib.units import inch
h = inch/3.0; k = inch/2.0
canvas.setStrokeColorRGB(0.2,0.3,0.5)
canvas.setFillColorRGB(0.8,0.6,0.2)
canvas.setLineWidth(4)
p = canvas.beginPath()
for i in (1,2,3,4):
for j in (1,2):
xc,yc = inch*i, inch*j
p.moveTo(xc,yc)
p.arcTo(xc-h, yc-k, xc+h, yc+k, startAng=0, extent=60*i)
# close only the first one, not the second one
if j==1:
p.close()
canvas.drawPath(p, fill=1, stroke=1)
"""
testforms = """
def forms(canvas):
#first create a form...
canvas.beginForm("SpumoniForm")
#re-use some drawing functions from earlier
spumoni(canvas)
canvas.endForm()
#then draw it
canvas.doForm("SpumoniForm")
"""
def doctemplateillustration(canvas):
from reportlab.lib.units import inch
canvas.setFont("Helvetica", 10)
canvas.drawString(inch/4.0, 2.75*inch, "DocTemplate")
W = 4/3.0*inch
H = 2*inch
Wd = x = inch/4.0
Hd =y = inch/2.0
for name in ("two column", "chapter page", "title page"):
canvas.setFillColorRGB(0.5,1.0,1.0)
canvas.rect(x,y,W,H, fill=1)
canvas.setFillColorRGB(0,0,0)
canvas.drawString(x+inch/8, y+H-Wd, "PageTemplate")
canvas.drawCentredString(x+W/2.0, y-Wd, name)
x = x+W+Wd
canvas.saveState()
d = inch/16
dW = (W-3*d)/2.0
hD = H -2*d-Wd
canvas.translate(Wd+d, Hd+d)
for name in ("left Frame", "right Frame"):
canvas.setFillColorRGB(1.0,0.5,1.0)
canvas.rect(0,0, dW,hD, fill=1)
canvas.setFillGray(0.7)
dd= d/2.0
ddH = (hD-6*dd)/5.0
ddW = dW-2*dd
yy = dd
xx = dd
for i in range(5):
canvas.rect(xx,yy,ddW,ddH, fill=1, stroke=0)
yy = yy+ddH+dd
canvas.setFillColorRGB(0,0,0)
canvas.saveState()
canvas.rotate(90)
canvas.drawString(d,-dW/2, name)
canvas.restoreState()
canvas.translate(dW+d,0)
canvas.restoreState()
canvas.setFillColorRGB(1.0, 0.5, 1.0)
mx = Wd+W+Wd+d
my = Hd+d
mW = W-2*d
mH = H-d-Hd
canvas.rect(mx, my, mW, mH, fill=1)
canvas.rect(Wd+2*(W+Wd)+d, Hd+3*d, W-2*d, H/2.0, fill=1)
canvas.setFillGray(0.7)
canvas.rect(Wd+2*(W+Wd)+d+dd, Hd+5*d, W-2*d-2*dd, H/2.0-2*d-dd, fill=1)
xx = mx+dd
yy = my+mH/5.0
ddH = (mH-6*dd-mH/5.0)/3.0
ddW = mW - 2*dd
for i in range(3):
canvas.setFillGray(0.7)
canvas.rect(xx,yy,ddW,ddH, fill=1, stroke=1)
canvas.setFillGray(0)
canvas.drawString(xx+dd/2.0,yy+dd/2.0, "flowable %s" %(157-i))
yy = yy+ddH+dd
canvas.drawCentredString(3*Wd+2*W+W/2, Hd+H/2.0, "First Flowable")
canvas.setFont("Times-BoldItalic", 8)
canvas.setFillGray(0)
canvas.drawCentredString(mx+mW/2.0, my+mH+3*dd, "Chapter 6: Lubricants")
canvas.setFont("Times-BoldItalic", 10)
canvas.drawCentredString(3*Wd+2*W+W/2, Hd+H-H/4, "College Life")
# D = dir()
g = globals()
Dprime = {}
from types import StringType
from string import strip
for (a,b) in g.items():
if a[:4]=="test" and type(b) is StringType:
#print 'for', a
#print b
b = strip(b)
exec(b+'\n')
platypussetup = """
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch
PAGE_HEIGHT=defaultPageSize[1]; PAGE_WIDTH=defaultPageSize[0]
styles = getSampleStyleSheet()
"""
platypusfirstpage = """
Title = "Hello world"
pageinfo = "platypus example"
def myFirstPage(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Bold',16)
canvas.drawCentredString(PAGE_WIDTH/2.0, PAGE_HEIGHT-108, Title)
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "First Page / %s" % pageinfo)
canvas.restoreState()
"""
platypusnextpage = """
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d %s" % (doc.page, pageinfo))
canvas.restoreState()
"""
platypusgo = """
def go():
doc = SimpleDocTemplate("phello.pdf")
Story = [Spacer(1,2*inch)]
style = styles["Normal"]
for i in range(100):
bogustext = ("This is Paragraph number %s. " % i) *20
p = Paragraph(bogustext, style)
Story.append(p)
Story.append(Spacer(1,0.2*inch))
doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)
"""
if __name__=="__main__":
# then do the platypus hello world
for b in platypussetup, platypusfirstpage, platypusnextpage, platypusgo:
b = strip(b)
exec(b+'\n')
go()
|
|
#
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <niemeyer@conectiva.com>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import threading
import tempfile
import sys, os
import signal
import errno
import shlex
from smart.const import Enum, INSTALL, REMOVE
from smart.sorter import ElementSorter
from smart.pm import PackageManager
from smart.cache import PreRequires
from smart import sysconf, iface, _
# Part of the logic in this file was based on information found in APT.
UNPACK = Enum("UNPACK")
CONFIG = Enum("CONFIG")
PURGE = Enum("PURGE")
DEBIAN_FRONTEND = "DEBIAN_FRONTEND"
APT_LISTCHANGES_FRONTEND = "APT_LISTCHANGES_FRONTEND"
class DebSorter(ElementSorter):
def __init__(self, changeset=None):
ElementSorter.__init__(self)
if changeset:
self.setChangeSet(changeset)
def setChangeSet(self, changeset):
# Set of priorities we use in this sorter.
HIGH, MEDIUM, LOW = range(3)
# XXX The organization here sucks a bit. :-( We should clean this
# up, perhaps by refactoring this code into separate methods.
self.reset()
for pkg in changeset:
op = changeset[pkg]
if op is INSTALL:
unpack = (pkg, UNPACK)
config = (pkg, CONFIG)
self.addSuccessor(unpack, config, HIGH)
else:
remove = (pkg, REMOVE)
self.addElement(remove)
# Unpacking or unconfiguring of a package must happen after
# its pre-dependencies are configured, or before they are
# unconfigured. We do the same for normal dependencies
# (non-pre) in an advisory fashion.
for req in pkg.requires:
if isinstance(req, PreRequires):
req_type_priority = MEDIUM
else:
req_type_priority = LOW
relations = []
def add_relation(pred, succ, priority=MEDIUM):
relations.append((pred, succ, priority))
for prv in req.providedby:
for prvpkg in prv.packages:
if changeset.get(prvpkg) is INSTALL:
if op is INSTALL:
# reqpkg=INSTALL, prvpkg=INSTALL
# ------------------------------
# When the package requiring a dependency and
# the package providing a dependency are both
# being installed, the unpack of the dependency
# must necessarily happen before the config of
# the dependent, and in pre-depends the unpack
# of the dependent must necessarily happen
# after the config of the dependency.
add_relation((prvpkg, UNPACK), config)
add_relation((prvpkg, CONFIG), config)
add_relation((prvpkg, CONFIG), unpack,
req_type_priority)
else:
# reqpkg=REMOVE, prvpkg=INSTALL
# -----------------------------
# When the package requiring a dependency is
# being removed, and the package providing the
# dependency is being installed, the unpack
# of the dependency must necessarily happen
# before the unconfiguration of the dependent,
# and on pre-requires the configuration of the
# dependency must happen before the
# unconfiguration of the dependent.
add_relation((prvpkg, UNPACK), remove)
add_relation((prvpkg, CONFIG), remove,
req_type_priority)
elif prvpkg.installed:
if changeset.get(prvpkg) is not REMOVE:
break
if op is INSTALL:
# reqpkg=INSTALL, prvpkg=REMOVE
# ------------------------------
# When the package providing the dependency
# is being removed, it may only be used by
# the dependent package before the former is
# removed from the system. This means that
# for both dependencies and pre-dependencies
# the removal must happen before the
# configuration.
add_relation(config, (prvpkg, REMOVE))
else:
# reqpkg=REMOVE, prvpkg=REMOVE
# ------------------------------
# When both the package requiring the dependency
# and the one providing it are being removed,
# the removal of pre-dependencies must
# necessarily be done before the dependency
# removal. We can't enforce it for dependencies
# because it would easily create a cycle.
add_relation(remove, (prvpkg, REMOVE),
req_type_priority)
else:
continue
break
else:
for relation in relations:
self.addSuccessor(*relation)
if op is INSTALL:
# That's a nice trick. We put the removed package after
# the upgrading package installation. If this relation
# is broken, it means that some conflict has moved the
# upgraded package removal due to a loop. In these cases
# we remove the package before the upgrade process,
# otherwise we do the upgrade and forget about the
# removal which is after.
upgpkgs = [upgpkg for prv in pkg.provides
for upg in prv.upgradedby
for upgpkg in upg.packages]
upgpkgs.extend([prvpkg for upg in pkg.upgrades
for prv in upg.providedby
for prvpkg in prv.packages])
for upgpkg in upgpkgs:
if changeset.get(upgpkg) is REMOVE:
self.addSuccessor(unpack, (upgpkg, REMOVE), MEDIUM)
# Conflicted packages being removed must go in
# before this package's unpacking.
cnfpkgs = [prvpkg for cnf in pkg.conflicts
for prv in cnf.providedby
for prvpkg in prv.packages
if prvpkg.name != pkg.name]
cnfpkgs.extend([cnfpkg for prv in pkg.provides
for cnf in prv.conflictedby
for cnfpkg in cnf.packages
if cnfpkg.name != pkg.name])
for cnfpkg in cnfpkgs:
if changeset.get(cnfpkg) is REMOVE:
self.addSuccessor((cnfpkg, REMOVE), unpack, HIGH)
class DebPackageManager(PackageManager):
MAXPKGSPEROP = 50
def commit(self, changeset, pkgpaths):
prog = iface.getProgress(self, True)
prog.start()
prog.setTopic(_("Committing transaction..."))
prog.show()
# Compute upgraded packages
upgraded = {}
for pkg in changeset.keys():
if changeset[pkg] is INSTALL:
upgpkgs = [upgpkg for prv in pkg.provides
for upg in prv.upgradedby
for upgpkg in upg.packages
if upgpkg.installed]
upgpkgs.extend([prvpkg for upg in pkg.upgrades
for prv in upg.providedby
for prvpkg in prv.packages
if prvpkg.installed])
if upgpkgs:
for upgpkg in upgpkgs:
assert changeset.get(upgpkg) is REMOVE, \
"Installing %s while %s is kept?" % \
(pkg, upgpkg)
assert upgpkg not in upgraded, \
"Two packages (%s and %s) upgrading the " \
"same installed package (%s)!?" % \
(pkg, upgraded[upgpkg], upgpkg)
upgraded[upgpkg] = pkg
sorter = DebSorter(changeset)
sorted = sorter.getSorted()
prog.set(0, len(sorted))
baseargs = shlex.split(sysconf.get("dpkg", "dpkg"))
opt = sysconf.get("deb-root")
if opt:
baseargs.append("--root=%s" % opt)
opt = sysconf.get("deb-admindir")
if opt:
baseargs.append("--admindir=%s" % opt)
opt = sysconf.get("deb-instdir")
if opt:
baseargs.append("--instdir=%s" % opt)
opt = sysconf.get("deb-simulate")
if opt:
baseargs.append("--simulate")
if sysconf.get("deb-purge"):
for i in range(len(sorted)):
pkg, op = sorted[i]
if op is REMOVE and not upgraded.get(pkg):
sorted[i] = pkg, PURGE
if sysconf.get("deb-non-interactive"):
old_debian_frontend = os.environ.get(DEBIAN_FRONTEND)
old_apt_lc_frontend = os.environ.get(APT_LISTCHANGES_FRONTEND)
os.environ[DEBIAN_FRONTEND] = "noninteractive"
os.environ[APT_LISTCHANGES_FRONTEND] = "none"
baseargs.append("--force-confold")
if sysconf.get("pm-iface-output"):
output = tempfile.TemporaryFile()
else:
output = sys.stdout
print >>output
done = {}
error = None
while sorted:
pkgs = []
op = sorted[0][1]
while (sorted and sorted[0][1] is op and
len(pkgs) < self.MAXPKGSPEROP):
pkg, op = sorted.pop(0)
if op is REMOVE and upgraded.get(pkg) in done:
continue
done[pkg] = True
opname = {REMOVE: "remove", PURGE: "purge", CONFIG: "config",
UNPACK: "unpack", INSTALL: "install"}
print >>output, "[%s] %s" % (opname[op], pkg)
pkgs.append(pkg)
if not pkgs:
continue
args = baseargs[:]
if op is REMOVE:
args.append("--force-depends")
args.append("--force-remove-essential")
args.append("--remove")
elif op is PURGE:
args.append("--force-remove-essential")
args.append("--purge")
elif op is UNPACK:
args.append("--unpack")
elif op is CONFIG:
args.append("--force-depends")
args.append("--force-remove-essential")
args.append("--configure")
if op is UNPACK:
for pkg in pkgs:
args.append(pkgpaths[pkg][0])
else:
for pkg in pkgs:
args.append(pkg.name)
thread_name = threading.currentThread().getName()
if thread_name == "MainThread":
quithandler = signal.signal(signal.SIGQUIT, signal.SIG_IGN)
inthandler = signal.signal(signal.SIGINT, signal.SIG_IGN)
output.flush()
cb = DebCallback(prog, op, pkgs)
status = self.dpkg(args, output, cb)
if thread_name == "MainThread":
signal.signal(signal.SIGQUIT, quithandler)
signal.signal(signal.SIGINT, inthandler)
if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 0:
if os.WIFSIGNALED(status) and os.WTERMSIG(status):
error = _("Sub-process %s has received a "
"segmentation fault") % args[0]
elif os.WIFEXITED(status):
error = _("Sub-process %s returned an error code "
"(%d)") % (args[0], os.WEXITSTATUS(status))
else:
error = _("Sub-process %s exited unexpectedly") % args[0]
break
if output != sys.stdout:
output.flush()
output.seek(0)
data = output.read(8192)
while data:
iface.showOutput(data)
data = output.read(8192)
output.close()
if sysconf.get("deb-non-interactive"):
if old_debian_frontend is None:
del os.environ[DEBIAN_FRONTEND]
else:
os.environ[DEBIAN_FRONTEND] = old_debian_frontend
if old_apt_lc_frontend is None:
del os.environ[APT_LISTCHANGES_FRONTEND]
else:
os.environ[APT_LISTCHANGES_FRONTEND] = old_apt_lc_frontend
if error:
iface.error(error)
prog.setDone()
prog.stop()
def dpkg(self, argv, output, callback=None):
r, w = os.pipe()
pid = os.fork()
if not pid:
if output != sys.stdout:
output_fd = output.fileno()
os.dup2(output_fd, 1)
os.dup2(output_fd, 2)
if callback:
os.dup2(w, 3)
argv.insert(1, "--status-fd=3")
#print >>output, " ".join(argv)
try:
os.execvp(argv[0], argv)
except OSError, e:
output.write("%s: %s\n" % (argv[0], str(e)))
os._exit(1)
output.flush()
os.close(w)
while True:
if callback:
data = os.read(r, 8192)
while data:
callback(data)
data = os.read(r, 8192)
try:
_pid, status = os.waitpid(pid, 0)
except OSError, e:
if e.errno != errno.EINTR:
raise
else:
if _pid == pid:
break
return status
class DebCallback:
def __init__(self, prog, op, pkgs):
self.prog = prog
self.op = op
self.pkgs = {}
for pkg in pkgs:
self.pkgs[pkg.name] = pkg
self.seen = {}
def __call__(self, data):
for line in data.splitlines():
what, pkgname, status = line.split(':', 2)
what = what.strip()
pkgname = pkgname.strip()
status = status.strip()
if what != "status":
return
if self.op is CONFIG and status == "unpacked": # odd duplicate
return
if not pkgname in self.pkgs:
return
op = self.op
pkg = self.pkgs[pkgname]
subkey = "%s:%s" % (op, pkgname)
if op is REMOVE:
self.prog.setSubTopic(subkey, _("Removing %s") % pkg.name)
if status == "installed" and subkey not in self.seen:
self.seen[subkey] = True
self.prog.setSub(subkey, 0, 1, 1)
elif status == "config-files" and subkey in self.seen:
del self.seen[subkey]
self.prog.setSubDone(subkey)
self.prog.show()
elif op is PURGE:
self.prog.setSubTopic(subkey, _("Purging %s") % pkg.name)
if status == "installed" and subkey not in self.seen:
self.seen[subkey] = True
self.prog.setSub(subkey, 0, 1, 1)
elif status == "not-installed" and subkey in self.seen:
del self.seen[subkey]
self.prog.setSubDone(subkey)
self.prog.show()
elif op is UNPACK:
self.prog.setSubTopic(subkey, _("Unpacking %s") % pkg.name)
if status == "half-installed" and subkey not in self.seen:
self.seen[subkey] = True
self.prog.setSub(subkey, 0, 1, 1)
elif status == "unpacked" and subkey in self.seen:
del self.seen[subkey]
self.prog.setSubDone(subkey)
self.prog.show()
elif op is CONFIG:
self.prog.setSubTopic(subkey, _("Configuring %s") % pkg.name)
if status == "half-configured" and subkey not in self.seen:
self.seen[subkey] = True
self.prog.setSub(subkey, 0, 1, 1)
elif status == "installed" and subkey in self.seen:
del self.seen[subkey]
self.prog.setSubDone(subkey)
self.prog.show()
# vim:ts=4:sw=4:et
|
|
# coding: utf-8
# #Clustering the [Enron e-mail corpus](http://www.cs.cmu.edu/~./enron/) using the Infinite Relational Model
# ---
#
# ##Let's setup our environment
# In[1]:
get_ipython().magic(u'matplotlib inline')
import pickle
import time
import itertools as it
import numpy as np
import matplotlib.pylab as plt
import matplotlib.patches as patches
from multiprocessing import cpu_count
import seaborn as sns
sns.set_context('talk')
# ###We've made a set of utilities especially for this dataset, `enron_utils`. We'll include these as well.
#
# `enron_crawler.py` downloads the data and preprocesses it as suggested by [Ishiguro et al. 2012](http://www.kecl.ntt.co.jp/as/members/ishiguro/open/2012AISTATS.pdf). The results of the scirpt have been stored in the `results.p`.
#
# ##Let's load the data and make a binary matrix to represent email communication between individuals
#
# In this matrix, $X_{i,j} = 1$ if and only if person$_{i}$ sent an email to person$_{j}$
# In[2]:
import enron_utils
with open('results.p') as fp:
communications = pickle.load(fp)
def allnames(o):
for k, v in o:
yield [k] + list(v)
names = set(it.chain.from_iterable(allnames(communications)))
names = sorted(list(names))
namemap = { name : idx for idx, name in enumerate(names) }
# In[3]:
N = len(names)
# X_{ij} = 1 iff person i sent person j an email
communications_relation = np.zeros((N, N), dtype=np.bool)
for sender, receivers in communications:
sender_id = namemap[sender]
for receiver in receivers:
receiver_id = namemap[receiver]
communications_relation[sender_id, receiver_id] = True
print "%d names in the corpus" % N
# ##Let's visualize the communication matrix
# In[4]:
labels = [i if i%20 == 0 else '' for i in xrange(N)]
sns.heatmap(communications_relation, linewidths=0, cbar=False, xticklabels=labels, yticklabels=labels)
plt.xlabel('person number')
plt.ylabel('person number')
plt.title('Email Communication Matrix')
# ##Now, let's learn the underlying clusters using the Inifinite Relational Model
#
# Let's import the necessary functions from datamicroscopes
#
# ##There are 5 steps necessary in inferring a model with datamicroscopes:
# 1. define the model
# 2. load the data
# 3. initialize the model
# 4. define the runners (MCMC chains)
# 5. run the runners
# In[5]:
from microscopes.common.rng import rng
from microscopes.common.relation.dataview import numpy_dataview
from microscopes.models import bb as beta_bernoulli
from microscopes.irm.definition import model_definition
from microscopes.irm import model, runner, query
from microscopes.kernels import parallel
from microscopes.common.query import groups, zmatrix_heuristic_block_ordering, zmatrix_reorder
# ##Let's start by defining the model and loading the data
# In[6]:
defn = model_definition([N], [((0, 0), beta_bernoulli)])
views = [numpy_dataview(communications_relation)]
prng = rng()
# ##Next, let's initialize the model and define the runners.
#
# ##These runners are our MCMC chains. We'll use `cpu_count` to define our number of chains.
# In[ ]:
nchains = cpu_count()
latents = [model.initialize(defn, views, r=prng, cluster_hps=[{'alpha':1e-3}]) for _ in xrange(nchains)]
kc = runner.default_assign_kernel_config(defn)
runners = [runner.runner(defn, views, latent, kc) for latent in latents]
r = parallel.runner(runners)
# ##From here, we can finally run each chain of the sampler 1000 times
# In[ ]:
start = time.time()
r.run(r=prng, niters=1000)
print "inference took {} seconds".format(time.time() - start)
# ##Now that we have learned our model let's get our cluster assignments
# In[ ]:
infers = r.get_latents()
clusters = groups(infers[0].assignments(0), sort=True)
ordering = list(it.chain.from_iterable(clusters))
# ##Let's sort the communications matrix to highlight our inferred clusters
# In[ ]:
z = communications_relation.copy()
z = z[ordering]
z = z[:,ordering]
sizes = map(len, clusters)
boundaries = np.cumsum(sizes)[:-1]
def cluster_with_name(clusters, name, payload=None):
ident = namemap[name]
for idx, cluster in enumerate(clusters):
if ident in cluster:
return idx, (cluster, payload)
raise ValueError("could not find name")
suspicious = [
# (k=3) in S5.3 of "Subset Infinite Relational Models"
# Identified with the green cluster
cluster_with_name(clusters, "horton-s", {"color":"#66CC66", "desc":"The pipeline/regulatory group"}),
# (k=2) in S5.3 of "Subset Infinite Relational Models"
# Identified with the orange cluster
cluster_with_name(clusters, "skilling-j", {"color":"#FF6600", "desc":"The VIP/executives group"}),
]
suspicious = dict(suspicious)
for idx, (boundary, size) in enumerate(zip(boundaries, sizes)):
if size < 5:
continue
plt.plot(range(N), boundary*np.ones(N), color='#0066CC')
plt.plot(boundary*np.ones(N), range(N), color='#0066CC')
if idx in suspicious:
rect = patches.Rectangle((boundary-size, boundary-size),
width=size, height=size, alpha=0.5, fc=suspicious[idx][1]["color"])
plt.gca().add_patch(rect)
plt.imshow(z, cmap=plt.cm.binary, interpolation='nearest')
# #We've identified two suspicious clusters. Let's look at the data to find out who these individuals are
# In[ ]:
def cluster_names(cluster):
return [names[idx] for idx in cluster]
def get_full_name(name):
return enron_utils.FULLNAMES.get(name, name)
def get_title(name):
return enron_utils.TITLES.get(name, "?")
for cluster, payload in suspicious.values():
cnames = cluster_names(cluster)
ctitles = map(get_title, cnames)
print payload["desc"]
for n, t in zip(cnames, ctitles):
print "\t", get_full_name(n), '\t\t"{}"'.format(t)
print
# #Given the uncertainty behind these latent clusters, we can visualize the variablity within these assignments with a z-matrix
#
# ###Ordering the z-matrix allows us to group members of each possible cluster together
# In[ ]:
zmat = query.zmatrix(domain=0, latents=infers)
zmat = zmatrix_reorder(zmat, zmatrix_heuristic_block_ordering(zmat))
# In[ ]:
plt.imshow(zmat, cmap=plt.cm.binary, interpolation='nearest')
plt.xlabel('people (sorted)')
plt.ylabel('people (sorted)')
|
|
"""The tests for the androidtv platform."""
import base64
import copy
import logging
from androidtv.constants import APPS as ANDROIDTV_APPS
from androidtv.exceptions import LockNotAcquiredException
import pytest
from homeassistant.components.androidtv.media_player import (
ANDROIDTV_DOMAIN,
ATTR_COMMAND,
ATTR_DEVICE_PATH,
ATTR_LOCAL_PATH,
CONF_ADB_SERVER_IP,
CONF_ADBKEY,
CONF_APPS,
CONF_EXCLUDE_UNNAMED_APPS,
CONF_TURN_OFF_COMMAND,
CONF_TURN_ON_COMMAND,
KEYS,
SERVICE_ADB_COMMAND,
SERVICE_DOWNLOAD,
SERVICE_LEARN_SENDEVENT,
SERVICE_UPLOAD,
)
from homeassistant.components.media_player import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_STOP,
SERVICE_SELECT_SOURCE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
)
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_CLASS,
CONF_HOST,
CONF_NAME,
CONF_PLATFORM,
EVENT_HOMEASSISTANT_STOP,
STATE_OFF,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.components.androidtv import patchers
SHELL_RESPONSE_OFF = ""
SHELL_RESPONSE_STANDBY = "1"
# Android TV device with Python ADB implementation
CONFIG_ANDROIDTV_PYTHON_ADB = {
DOMAIN: {
CONF_PLATFORM: ANDROIDTV_DOMAIN,
CONF_HOST: "127.0.0.1",
CONF_NAME: "Android TV",
CONF_DEVICE_CLASS: "androidtv",
}
}
# Android TV device with ADB server
CONFIG_ANDROIDTV_ADB_SERVER = {
DOMAIN: {
CONF_PLATFORM: ANDROIDTV_DOMAIN,
CONF_HOST: "127.0.0.1",
CONF_NAME: "Android TV",
CONF_DEVICE_CLASS: "androidtv",
CONF_ADB_SERVER_IP: "127.0.0.1",
}
}
# Fire TV device with Python ADB implementation
CONFIG_FIRETV_PYTHON_ADB = {
DOMAIN: {
CONF_PLATFORM: ANDROIDTV_DOMAIN,
CONF_HOST: "127.0.0.1",
CONF_NAME: "Fire TV",
CONF_DEVICE_CLASS: "firetv",
}
}
# Fire TV device with ADB server
CONFIG_FIRETV_ADB_SERVER = {
DOMAIN: {
CONF_PLATFORM: ANDROIDTV_DOMAIN,
CONF_HOST: "127.0.0.1",
CONF_NAME: "Fire TV",
CONF_DEVICE_CLASS: "firetv",
CONF_ADB_SERVER_IP: "127.0.0.1",
}
}
def _setup(config):
"""Perform common setup tasks for the tests."""
if CONF_ADB_SERVER_IP not in config[DOMAIN]:
patch_key = "python"
else:
patch_key = "server"
if config[DOMAIN].get(CONF_DEVICE_CLASS) != "firetv":
entity_id = "media_player.android_tv"
else:
entity_id = "media_player.fire_tv"
return patch_key, entity_id
async def _test_reconnect(hass, caplog, config):
"""Test that the error and reconnection attempts are logged correctly.
"Handles device/service unavailable. Log a warning once when
unavailable, log once when reconnected."
https://developers.home-assistant.io/docs/en/integration_quality_scale_index.html
"""
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[
patch_key
], patchers.PATCH_KEYGEN, patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
caplog.clear()
caplog.set_level(logging.WARNING)
with patchers.patch_connect(False)[patch_key], patchers.patch_shell(error=True)[
patch_key
], patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
for _ in range(5):
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_UNAVAILABLE
assert len(caplog.record_tuples) == 2
assert caplog.record_tuples[0][1] == logging.ERROR
assert caplog.record_tuples[1][1] == logging.WARNING
caplog.set_level(logging.DEBUG)
with patchers.patch_connect(True)[patch_key], patchers.patch_shell(
SHELL_RESPONSE_STANDBY
)[patch_key], patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_STANDBY
if patch_key == "python":
assert (
"ADB connection to 127.0.0.1:5555 successfully established"
in caplog.record_tuples[2]
)
else:
assert (
"ADB connection to 127.0.0.1:5555 via ADB server 127.0.0.1:5037 successfully established"
in caplog.record_tuples[2]
)
return True
async def _test_adb_shell_returns_none(hass, config):
"""Test the case that the ADB shell command returns `None`.
The state should be `None` and the device should be unavailable.
"""
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[
patch_key
], patchers.PATCH_KEYGEN, patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state != STATE_UNAVAILABLE
with patchers.patch_shell(None)[patch_key], patchers.patch_shell(error=True)[
patch_key
], patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_UNAVAILABLE
return True
async def test_reconnect_androidtv_python_adb(hass, caplog):
"""Test that the error and reconnection attempts are logged correctly.
* Device type: Android TV
* ADB connection method: Python ADB implementation
"""
assert await _test_reconnect(hass, caplog, CONFIG_ANDROIDTV_PYTHON_ADB)
async def test_adb_shell_returns_none_androidtv_python_adb(hass):
"""Test the case that the ADB shell command returns `None`.
* Device type: Android TV
* ADB connection method: Python ADB implementation
"""
assert await _test_adb_shell_returns_none(hass, CONFIG_ANDROIDTV_PYTHON_ADB)
async def test_reconnect_firetv_python_adb(hass, caplog):
"""Test that the error and reconnection attempts are logged correctly.
* Device type: Fire TV
* ADB connection method: Python ADB implementation
"""
assert await _test_reconnect(hass, caplog, CONFIG_FIRETV_PYTHON_ADB)
async def test_adb_shell_returns_none_firetv_python_adb(hass):
"""Test the case that the ADB shell command returns `None`.
* Device type: Fire TV
* ADB connection method: Python ADB implementation
"""
assert await _test_adb_shell_returns_none(hass, CONFIG_FIRETV_PYTHON_ADB)
async def test_reconnect_androidtv_adb_server(hass, caplog):
"""Test that the error and reconnection attempts are logged correctly.
* Device type: Android TV
* ADB connection method: ADB server
"""
assert await _test_reconnect(hass, caplog, CONFIG_ANDROIDTV_ADB_SERVER)
async def test_adb_shell_returns_none_androidtv_adb_server(hass):
"""Test the case that the ADB shell command returns `None`.
* Device type: Android TV
* ADB connection method: ADB server
"""
assert await _test_adb_shell_returns_none(hass, CONFIG_ANDROIDTV_ADB_SERVER)
async def test_reconnect_firetv_adb_server(hass, caplog):
"""Test that the error and reconnection attempts are logged correctly.
* Device type: Fire TV
* ADB connection method: ADB server
"""
assert await _test_reconnect(hass, caplog, CONFIG_FIRETV_ADB_SERVER)
async def test_adb_shell_returns_none_firetv_adb_server(hass):
"""Test the case that the ADB shell command returns `None`.
* Device type: Fire TV
* ADB connection method: ADB server
"""
assert await _test_adb_shell_returns_none(hass, CONFIG_FIRETV_ADB_SERVER)
async def test_setup_with_adbkey(hass):
"""Test that setup succeeds when using an ADB key."""
config = copy.deepcopy(CONFIG_ANDROIDTV_PYTHON_ADB)
config[DOMAIN][CONF_ADBKEY] = hass.config.path("user_provided_adbkey")
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[
patch_key
], patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER, patchers.PATCH_ISFILE, patchers.PATCH_ACCESS:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
async def _test_sources(hass, config0):
"""Test that sources (i.e., apps) are handled correctly for Android TV and Fire TV devices."""
config = copy.deepcopy(config0)
config[DOMAIN][CONF_APPS] = {
"com.app.test1": "TEST 1",
"com.app.test3": None,
"com.app.test4": SHELL_RESPONSE_OFF,
}
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
if config[DOMAIN].get(CONF_DEVICE_CLASS) != "firetv":
patch_update = patchers.patch_androidtv_update(
"playing",
"com.app.test1",
["com.app.test1", "com.app.test2", "com.app.test3", "com.app.test4"],
"hdmi",
False,
1,
"HW5",
)
else:
patch_update = patchers.patch_firetv_update(
"playing",
"com.app.test1",
["com.app.test1", "com.app.test2", "com.app.test3", "com.app.test4"],
"HW5",
)
with patch_update:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_PLAYING
assert state.attributes["source"] == "TEST 1"
assert sorted(state.attributes["source_list"]) == ["TEST 1", "com.app.test2"]
if config[DOMAIN].get(CONF_DEVICE_CLASS) != "firetv":
patch_update = patchers.patch_androidtv_update(
"playing",
"com.app.test2",
["com.app.test2", "com.app.test1", "com.app.test3", "com.app.test4"],
"hdmi",
True,
0,
"HW5",
)
else:
patch_update = patchers.patch_firetv_update(
"playing",
"com.app.test2",
["com.app.test2", "com.app.test1", "com.app.test3", "com.app.test4"],
"HW5",
)
with patch_update:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_PLAYING
assert state.attributes["source"] == "com.app.test2"
assert sorted(state.attributes["source_list"]) == ["TEST 1", "com.app.test2"]
return True
async def test_androidtv_sources(hass):
"""Test that sources (i.e., apps) are handled correctly for Android TV devices."""
assert await _test_sources(hass, CONFIG_ANDROIDTV_ADB_SERVER)
async def test_firetv_sources(hass):
"""Test that sources (i.e., apps) are handled correctly for Fire TV devices."""
assert await _test_sources(hass, CONFIG_FIRETV_ADB_SERVER)
async def _test_exclude_sources(hass, config0, expected_sources):
"""Test that sources (i.e., apps) are handled correctly when the `exclude_unnamed_apps` config parameter is provided."""
config = copy.deepcopy(config0)
config[DOMAIN][CONF_APPS] = {
"com.app.test1": "TEST 1",
"com.app.test3": None,
"com.app.test4": SHELL_RESPONSE_OFF,
}
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
if config[DOMAIN].get(CONF_DEVICE_CLASS) != "firetv":
patch_update = patchers.patch_androidtv_update(
"playing",
"com.app.test1",
[
"com.app.test1",
"com.app.test2",
"com.app.test3",
"com.app.test4",
"com.app.test5",
],
"hdmi",
False,
1,
"HW5",
)
else:
patch_update = patchers.patch_firetv_update(
"playing",
"com.app.test1",
[
"com.app.test1",
"com.app.test2",
"com.app.test3",
"com.app.test4",
"com.app.test5",
],
"HW5",
)
with patch_update:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_PLAYING
assert state.attributes["source"] == "TEST 1"
assert sorted(state.attributes["source_list"]) == expected_sources
return True
async def test_androidtv_exclude_sources(hass):
"""Test that sources (i.e., apps) are handled correctly for Android TV devices when the `exclude_unnamed_apps` config parameter is provided as true."""
config = copy.deepcopy(CONFIG_ANDROIDTV_ADB_SERVER)
config[DOMAIN][CONF_EXCLUDE_UNNAMED_APPS] = True
assert await _test_exclude_sources(hass, config, ["TEST 1"])
async def test_firetv_exclude_sources(hass):
"""Test that sources (i.e., apps) are handled correctly for Fire TV devices when the `exclude_unnamed_apps` config parameter is provided as true."""
config = copy.deepcopy(CONFIG_FIRETV_ADB_SERVER)
config[DOMAIN][CONF_EXCLUDE_UNNAMED_APPS] = True
assert await _test_exclude_sources(hass, config, ["TEST 1"])
async def _test_select_source(hass, config0, source, expected_arg, method_patch):
"""Test that the methods for launching and stopping apps are called correctly when selecting a source."""
config = copy.deepcopy(config0)
config[DOMAIN][CONF_APPS] = {
"com.app.test1": "TEST 1",
"com.app.test3": None,
"com.youtube.test": "YouTube",
}
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
with method_patch as method_patch_:
await hass.services.async_call(
DOMAIN,
SERVICE_SELECT_SOURCE,
{ATTR_ENTITY_ID: entity_id, ATTR_INPUT_SOURCE: source},
blocking=True,
)
method_patch_.assert_called_with(expected_arg)
return True
async def test_androidtv_select_source_launch_app_id(hass):
"""Test that an app can be launched using its app ID."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"com.app.test1",
"com.app.test1",
patchers.PATCH_LAUNCH_APP,
)
async def test_androidtv_select_source_launch_app_name(hass):
"""Test that an app can be launched using its friendly name."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"TEST 1",
"com.app.test1",
patchers.PATCH_LAUNCH_APP,
)
async def test_androidtv_select_source_launch_app_id_no_name(hass):
"""Test that an app can be launched using its app ID when it has no friendly name."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"com.app.test2",
"com.app.test2",
patchers.PATCH_LAUNCH_APP,
)
async def test_androidtv_select_source_launch_app_hidden(hass):
"""Test that an app can be launched using its app ID when it is hidden from the sources list."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"com.app.test3",
"com.app.test3",
patchers.PATCH_LAUNCH_APP,
)
async def test_androidtv_select_source_overridden_app_name(hass):
"""Test that when an app name is overridden via the `apps` configuration parameter, the app is launched correctly."""
# Evidence that the default YouTube app ID will be overridden
assert "YouTube" in ANDROIDTV_APPS.values()
assert "com.youtube.test" not in ANDROIDTV_APPS
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"YouTube",
"com.youtube.test",
patchers.PATCH_LAUNCH_APP,
)
async def test_androidtv_select_source_stop_app_id(hass):
"""Test that an app can be stopped using its app ID."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"!com.app.test1",
"com.app.test1",
patchers.PATCH_STOP_APP,
)
async def test_androidtv_select_source_stop_app_name(hass):
"""Test that an app can be stopped using its friendly name."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"!TEST 1",
"com.app.test1",
patchers.PATCH_STOP_APP,
)
async def test_androidtv_select_source_stop_app_id_no_name(hass):
"""Test that an app can be stopped using its app ID when it has no friendly name."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"!com.app.test2",
"com.app.test2",
patchers.PATCH_STOP_APP,
)
async def test_androidtv_select_source_stop_app_hidden(hass):
"""Test that an app can be stopped using its app ID when it is hidden from the sources list."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"!com.app.test3",
"com.app.test3",
patchers.PATCH_STOP_APP,
)
async def test_firetv_select_source_launch_app_id(hass):
"""Test that an app can be launched using its app ID."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"com.app.test1",
"com.app.test1",
patchers.PATCH_LAUNCH_APP,
)
async def test_firetv_select_source_launch_app_name(hass):
"""Test that an app can be launched using its friendly name."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"TEST 1",
"com.app.test1",
patchers.PATCH_LAUNCH_APP,
)
async def test_firetv_select_source_launch_app_id_no_name(hass):
"""Test that an app can be launched using its app ID when it has no friendly name."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"com.app.test2",
"com.app.test2",
patchers.PATCH_LAUNCH_APP,
)
async def test_firetv_select_source_launch_app_hidden(hass):
"""Test that an app can be launched using its app ID when it is hidden from the sources list."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"com.app.test3",
"com.app.test3",
patchers.PATCH_LAUNCH_APP,
)
async def test_firetv_select_source_stop_app_id(hass):
"""Test that an app can be stopped using its app ID."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"!com.app.test1",
"com.app.test1",
patchers.PATCH_STOP_APP,
)
async def test_firetv_select_source_stop_app_name(hass):
"""Test that an app can be stopped using its friendly name."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"!TEST 1",
"com.app.test1",
patchers.PATCH_STOP_APP,
)
async def test_firetv_select_source_stop_app_id_no_name(hass):
"""Test that an app can be stopped using its app ID when it has no friendly name."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"!com.app.test2",
"com.app.test2",
patchers.PATCH_STOP_APP,
)
async def test_firetv_select_source_stop_hidden(hass):
"""Test that an app can be stopped using its app ID when it is hidden from the sources list."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"!com.app.test3",
"com.app.test3",
patchers.PATCH_STOP_APP,
)
async def _test_setup_fail(hass, config):
"""Test that the entity is not created when the ADB connection is not established."""
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(False)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[
patch_key
], patchers.PATCH_KEYGEN, patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is None
return True
async def test_setup_fail_androidtv(hass):
"""Test that the Android TV entity is not created when the ADB connection is not established."""
assert await _test_setup_fail(hass, CONFIG_ANDROIDTV_PYTHON_ADB)
async def test_setup_fail_firetv(hass):
"""Test that the Fire TV entity is not created when the ADB connection is not established."""
assert await _test_setup_fail(hass, CONFIG_FIRETV_PYTHON_ADB)
async def test_setup_two_devices(hass):
"""Test that two devices can be set up."""
config = {
DOMAIN: [
CONFIG_ANDROIDTV_ADB_SERVER[DOMAIN],
copy.deepcopy(CONFIG_FIRETV_ADB_SERVER[DOMAIN]),
]
}
config[DOMAIN][1][CONF_HOST] = "127.0.0.2"
patch_key = "server"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
for entity_id in ["media_player.android_tv", "media_player.fire_tv"]:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
async def test_setup_same_device_twice(hass):
"""Test that setup succeeds with a duplicated config entry."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert hass.services.has_service(ANDROIDTV_DOMAIN, SERVICE_ADB_COMMAND)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
async def test_adb_command(hass):
"""Test sending a command via the `androidtv.adb_command` service."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
command = "test command"
response = "test response"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_shell", return_value=response
) as patch_shell:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_ADB_COMMAND,
{ATTR_ENTITY_ID: entity_id, ATTR_COMMAND: command},
blocking=True,
)
patch_shell.assert_called_with(command)
state = hass.states.get(entity_id)
assert state is not None
assert state.attributes["adb_response"] == response
async def test_adb_command_unicode_decode_error(hass):
"""Test sending a command via the `androidtv.adb_command` service that raises a UnicodeDecodeError exception."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
command = "test command"
response = b"test response"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_shell",
side_effect=UnicodeDecodeError("utf-8", response, 0, len(response), "TEST"),
):
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_ADB_COMMAND,
{ATTR_ENTITY_ID: entity_id, ATTR_COMMAND: command},
blocking=True,
)
# patch_shell.assert_called_with(command)
state = hass.states.get(entity_id)
assert state is not None
assert state.attributes["adb_response"] is None
async def test_adb_command_key(hass):
"""Test sending a key command via the `androidtv.adb_command` service."""
patch_key = "server"
entity_id = "media_player.android_tv"
command = "HOME"
response = None
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_shell", return_value=response
) as patch_shell:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_ADB_COMMAND,
{ATTR_ENTITY_ID: entity_id, ATTR_COMMAND: command},
blocking=True,
)
patch_shell.assert_called_with(f"input keyevent {KEYS[command]}")
state = hass.states.get(entity_id)
assert state is not None
assert state.attributes["adb_response"] is None
async def test_adb_command_get_properties(hass):
"""Test sending the "GET_PROPERTIES" command via the `androidtv.adb_command` service."""
patch_key = "server"
entity_id = "media_player.android_tv"
command = "GET_PROPERTIES"
response = {"test key": "test value"}
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties_dict",
return_value=response,
) as patch_get_props:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_ADB_COMMAND,
{ATTR_ENTITY_ID: entity_id, ATTR_COMMAND: command},
blocking=True,
)
patch_get_props.assert_called()
state = hass.states.get(entity_id)
assert state is not None
assert state.attributes["adb_response"] == str(response)
async def test_learn_sendevent(hass):
"""Test the `androidtv.learn_sendevent` service."""
patch_key = "server"
entity_id = "media_player.android_tv"
response = "sendevent 1 2 3 4"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.learn_sendevent",
return_value=response,
) as patch_learn_sendevent:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_LEARN_SENDEVENT,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
patch_learn_sendevent.assert_called()
state = hass.states.get(entity_id)
assert state is not None
assert state.attributes["adb_response"] == response
async def test_update_lock_not_acquired(hass):
"""Test that the state does not get updated when a `LockNotAcquiredException` is raised."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
with patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.update",
side_effect=LockNotAcquiredException,
):
with patchers.patch_shell(SHELL_RESPONSE_STANDBY)[patch_key]:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
with patchers.patch_shell(SHELL_RESPONSE_STANDBY)[patch_key]:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_STANDBY
async def test_download(hass):
"""Test the `androidtv.download` service."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
device_path = "device/path"
local_path = "local/path"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
# Failed download because path is not whitelisted
with patch("androidtv.basetv.basetv_async.BaseTVAsync.adb_pull") as patch_pull:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_DOWNLOAD,
{
ATTR_ENTITY_ID: entity_id,
ATTR_DEVICE_PATH: device_path,
ATTR_LOCAL_PATH: local_path,
},
blocking=True,
)
patch_pull.assert_not_called()
# Successful download
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_pull"
) as patch_pull, patch.object(hass.config, "is_allowed_path", return_value=True):
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_DOWNLOAD,
{
ATTR_ENTITY_ID: entity_id,
ATTR_DEVICE_PATH: device_path,
ATTR_LOCAL_PATH: local_path,
},
blocking=True,
)
patch_pull.assert_called_with(local_path, device_path)
async def test_upload(hass):
"""Test the `androidtv.upload` service."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
device_path = "device/path"
local_path = "local/path"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
# Failed upload because path is not whitelisted
with patch("androidtv.basetv.basetv_async.BaseTVAsync.adb_push") as patch_push:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_UPLOAD,
{
ATTR_ENTITY_ID: entity_id,
ATTR_DEVICE_PATH: device_path,
ATTR_LOCAL_PATH: local_path,
},
blocking=True,
)
patch_push.assert_not_called()
# Successful upload
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_push"
) as patch_push, patch.object(hass.config, "is_allowed_path", return_value=True):
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_UPLOAD,
{
ATTR_ENTITY_ID: entity_id,
ATTR_DEVICE_PATH: device_path,
ATTR_LOCAL_PATH: local_path,
},
blocking=True,
)
patch_push.assert_called_with(local_path, device_path)
async def test_androidtv_volume_set(hass):
"""Test setting the volume for an Android TV device."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.set_volume_level", return_value=0.5
) as patch_set_volume_level:
await hass.services.async_call(
DOMAIN,
SERVICE_VOLUME_SET,
{ATTR_ENTITY_ID: entity_id, ATTR_MEDIA_VOLUME_LEVEL: 0.5},
blocking=True,
)
patch_set_volume_level.assert_called_with(0.5)
async def test_get_image(hass, hass_ws_client):
"""Test taking a screen capture.
This is based on `test_get_image` in tests/components/media_player/test_init.py.
"""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patchers.patch_shell("11")[patch_key]:
await hass.helpers.entity_component.async_update_entity(entity_id)
client = await hass_ws_client(hass)
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_screencap", return_value=b"image"
):
await client.send_json(
{"id": 5, "type": "media_player_thumbnail", "entity_id": entity_id}
)
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]["content_type"] == "image/png"
assert msg["result"]["content"] == base64.b64encode(b"image").decode("utf-8")
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_screencap",
side_effect=RuntimeError,
):
await client.send_json(
{"id": 6, "type": "media_player_thumbnail", "entity_id": entity_id}
)
msg = await client.receive_json()
# The device is unavailable, but getting the media image did not cause an exception
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_UNAVAILABLE
async def _test_service(
hass,
entity_id,
ha_service_name,
androidtv_method,
additional_service_data=None,
return_value=None,
):
"""Test generic Android TV media player entity service."""
service_data = {ATTR_ENTITY_ID: entity_id}
if additional_service_data:
service_data.update(additional_service_data)
androidtv_patch = (
"androidtv.androidtv_async.AndroidTVAsync"
if "android" in entity_id
else "firetv.firetv_async.FireTVAsync"
)
with patch(
f"androidtv.{androidtv_patch}.{androidtv_method}", return_value=return_value
) as service_call:
await hass.services.async_call(
DOMAIN,
ha_service_name,
service_data=service_data,
blocking=True,
)
assert service_call.called
async def test_services_androidtv(hass):
"""Test media player services for an Android TV device."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[patch_key]:
with patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(
hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER
)
await hass.async_block_till_done()
with patchers.patch_shell(SHELL_RESPONSE_STANDBY)[patch_key]:
await _test_service(
hass, entity_id, SERVICE_MEDIA_NEXT_TRACK, "media_next_track"
)
await _test_service(hass, entity_id, SERVICE_MEDIA_PAUSE, "media_pause")
await _test_service(hass, entity_id, SERVICE_MEDIA_PLAY, "media_play")
await _test_service(
hass, entity_id, SERVICE_MEDIA_PLAY_PAUSE, "media_play_pause"
)
await _test_service(
hass, entity_id, SERVICE_MEDIA_PREVIOUS_TRACK, "media_previous_track"
)
await _test_service(hass, entity_id, SERVICE_MEDIA_STOP, "media_stop")
await _test_service(hass, entity_id, SERVICE_TURN_OFF, "turn_off")
await _test_service(hass, entity_id, SERVICE_TURN_ON, "turn_on")
await _test_service(
hass, entity_id, SERVICE_VOLUME_DOWN, "volume_down", return_value=0.1
)
await _test_service(
hass,
entity_id,
SERVICE_VOLUME_MUTE,
"mute_volume",
{ATTR_MEDIA_VOLUME_MUTED: False},
)
await _test_service(
hass,
entity_id,
SERVICE_VOLUME_SET,
"set_volume_level",
{ATTR_MEDIA_VOLUME_LEVEL: 0.5},
0.5,
)
await _test_service(
hass, entity_id, SERVICE_VOLUME_UP, "volume_up", return_value=0.2
)
async def test_services_firetv(hass):
"""Test media player services for a Fire TV device."""
patch_key, entity_id = _setup(CONFIG_FIRETV_ADB_SERVER)
config = copy.deepcopy(CONFIG_FIRETV_ADB_SERVER)
config[DOMAIN][CONF_TURN_OFF_COMMAND] = "test off"
config[DOMAIN][CONF_TURN_ON_COMMAND] = "test on"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[patch_key]:
with patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
with patchers.patch_shell(SHELL_RESPONSE_STANDBY)[patch_key]:
await _test_service(hass, entity_id, SERVICE_MEDIA_STOP, "back")
await _test_service(hass, entity_id, SERVICE_TURN_OFF, "adb_shell")
await _test_service(hass, entity_id, SERVICE_TURN_ON, "adb_shell")
async def test_connection_closed_on_ha_stop(hass):
"""Test that the ADB socket connection is closed when HA stops."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[patch_key]:
with patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(
hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER
)
await hass.async_block_till_done()
with patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.adb_close"
) as adb_close:
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert adb_close.called
async def test_exception(hass):
"""Test that the ADB connection gets closed when there is an unforeseen exception.
HA will attempt to reconnect on the next update.
"""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_PYTHON_ADB)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[
patch_key
], patchers.PATCH_KEYGEN, patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_PYTHON_ADB)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
# When an unforessen exception occurs, we close the ADB connection and raise the exception
with patchers.PATCH_ANDROIDTV_UPDATE_EXCEPTION, pytest.raises(Exception):
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_UNAVAILABLE
# On the next update, HA will reconnect to the device
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
|
|
import warnings
from collections import defaultdict
import numpy as np
import pandas as pd
from .coding import strings, times, variables
from .coding.variables import SerializationWarning, pop_to
from .core import duck_array_ops, indexing
from .core.common import contains_cftime_datetimes
from .core.pycompat import is_duck_dask_array
from .core.variable import IndexVariable, Variable, as_variable
CF_RELATED_DATA = (
"bounds",
"grid_mapping",
"climatology",
"geometry",
"node_coordinates",
"node_count",
"part_node_count",
"interior_ring",
"cell_measures",
"formula_terms",
)
CF_RELATED_DATA_NEEDS_PARSING = (
"cell_measures",
"formula_terms",
)
class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Decode arrays on the fly from non-native to native endianness
This is useful for decoding arrays from netCDF3 files (which are all
big endian) into native endianness, so they can be used with Cython
functions, such as those found in bottleneck and pandas.
>>> x = np.arange(5, dtype=">i2")
>>> x.dtype
dtype('>i2')
>>> NativeEndiannessArray(x).dtype
dtype('int16')
>>> indexer = indexing.BasicIndexer((slice(None),))
>>> NativeEndiannessArray(x)[indexer].dtype
dtype('int16')
"""
__slots__ = ("array",)
def __init__(self, array):
self.array = indexing.as_indexable(array)
@property
def dtype(self):
return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Decode arrays on the fly from integer to boolean datatype
This is useful for decoding boolean arrays from integer typed netCDF
variables.
>>> x = np.array([1, 0, 1, 1, 0], dtype="i1")
>>> x.dtype
dtype('int8')
>>> BoolTypeArray(x).dtype
dtype('bool')
>>> indexer = indexing.BasicIndexer((slice(None),))
>>> BoolTypeArray(x)[indexer].dtype
dtype('bool')
"""
__slots__ = ("array",)
def __init__(self, array):
self.array = indexing.as_indexable(array)
@property
def dtype(self):
return np.dtype("bool")
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
def _var_as_tuple(var):
return var.dims, var.data, var.attrs.copy(), var.encoding.copy()
def maybe_encode_nonstring_dtype(var, name=None):
if "dtype" in var.encoding and var.encoding["dtype"] not in ("S1", str):
dims, data, attrs, encoding = _var_as_tuple(var)
dtype = np.dtype(encoding.pop("dtype"))
if dtype != var.dtype:
if np.issubdtype(dtype, np.integer):
if (
np.issubdtype(var.dtype, np.floating)
and "_FillValue" not in var.attrs
and "missing_value" not in var.attrs
):
warnings.warn(
f"saving variable {name} with floating "
"point data as an integer dtype without "
"any _FillValue to use for NaNs",
SerializationWarning,
stacklevel=10,
)
data = duck_array_ops.around(data)[...]
data = data.astype(dtype=dtype)
var = Variable(dims, data, attrs, encoding)
return var
def maybe_default_fill_value(var):
# make NaN the fill value for float types:
if (
"_FillValue" not in var.attrs
and "_FillValue" not in var.encoding
and np.issubdtype(var.dtype, np.floating)
):
var.attrs["_FillValue"] = var.dtype.type(np.nan)
return var
def maybe_encode_bools(var):
if (
(var.dtype == bool)
and ("dtype" not in var.encoding)
and ("dtype" not in var.attrs)
):
dims, data, attrs, encoding = _var_as_tuple(var)
attrs["dtype"] = "bool"
data = data.astype(dtype="i1", copy=True)
var = Variable(dims, data, attrs, encoding)
return var
def _infer_dtype(array, name=None):
"""Given an object array with no missing values, infer its dtype from its
first element
"""
if array.dtype.kind != "O":
raise TypeError("infer_type must be called on a dtype=object array")
if array.size == 0:
return np.dtype(float)
element = array[(0,) * array.ndim]
# We use the base types to avoid subclasses of bytes and str (which might
# not play nice with e.g. hdf5 datatypes), such as those from numpy
if isinstance(element, bytes):
return strings.create_vlen_dtype(bytes)
elif isinstance(element, str):
return strings.create_vlen_dtype(str)
dtype = np.array(element).dtype
if dtype.kind != "O":
return dtype
raise ValueError(
"unable to infer dtype on variable {!r}; xarray "
"cannot serialize arbitrary Python objects".format(name)
)
def ensure_not_multiindex(var, name=None):
if isinstance(var, IndexVariable) and isinstance(var.to_index(), pd.MultiIndex):
raise NotImplementedError(
"variable {!r} is a MultiIndex, which cannot yet be "
"serialized to netCDF files "
"(https://github.com/pydata/xarray/issues/1077). Use "
"reset_index() to convert MultiIndex levels into coordinate "
"variables instead.".format(name)
)
def _copy_with_dtype(data, dtype):
"""Create a copy of an array with the given dtype.
We use this instead of np.array() to ensure that custom object dtypes end
up on the resulting array.
"""
result = np.empty(data.shape, dtype)
result[...] = data
return result
def ensure_dtype_not_object(var, name=None):
# TODO: move this from conventions to backends? (it's not CF related)
if var.dtype.kind == "O":
dims, data, attrs, encoding = _var_as_tuple(var)
if is_duck_dask_array(data):
warnings.warn(
"variable {} has data in the form of a dask array with "
"dtype=object, which means it is being loaded into memory "
"to determine a data type that can be safely stored on disk. "
"To avoid this, coerce this variable to a fixed-size dtype "
"with astype() before saving it.".format(name),
SerializationWarning,
)
data = data.compute()
missing = pd.isnull(data)
if missing.any():
# nb. this will fail for dask.array data
non_missing_values = data[~missing]
inferred_dtype = _infer_dtype(non_missing_values, name)
# There is no safe bit-pattern for NA in typical binary string
# formats, we so can't set a fill_value. Unfortunately, this means
# we can't distinguish between missing values and empty strings.
if strings.is_bytes_dtype(inferred_dtype):
fill_value = b""
elif strings.is_unicode_dtype(inferred_dtype):
fill_value = ""
else:
# insist on using float for numeric values
if not np.issubdtype(inferred_dtype, np.floating):
inferred_dtype = np.dtype(float)
fill_value = inferred_dtype.type(np.nan)
data = _copy_with_dtype(data, dtype=inferred_dtype)
data[missing] = fill_value
else:
data = _copy_with_dtype(data, dtype=_infer_dtype(data, name))
assert data.dtype.kind != "O" or data.dtype.metadata
var = Variable(dims, data, attrs, encoding)
return var
def encode_cf_variable(var, needs_copy=True, name=None):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : Variable
A variable holding un-encoded data.
Returns
-------
out : Variable
A variable which has been encoded as described above.
"""
ensure_not_multiindex(var, name=name)
for coder in [
times.CFDatetimeCoder(),
times.CFTimedeltaCoder(),
variables.CFScaleOffsetCoder(),
variables.CFMaskCoder(),
variables.UnsignedIntegerCoder(),
]:
var = coder.encode(var, name=name)
# TODO(shoyer): convert all of these to use coders, too:
var = maybe_encode_nonstring_dtype(var, name=name)
var = maybe_default_fill_value(var)
var = maybe_encode_bools(var)
var = ensure_dtype_not_object(var, name=name)
for attr_name in CF_RELATED_DATA:
pop_to(var.encoding, var.attrs, attr_name)
return var
def decode_cf_variable(
name,
var,
concat_characters=True,
mask_and_scale=True,
decode_times=True,
decode_endianness=True,
stack_char_dim=True,
use_cftime=None,
decode_timedelta=None,
):
"""
Decodes a variable which may hold CF encoded information.
This includes variables that have been masked and scaled, which
hold CF style time variables (this is almost always the case if
the dataset has been serialized) and which have strings encoded
as character arrays.
Parameters
----------
name : str
Name of the variable. Used for better error messages.
var : Variable
A variable holding potentially CF encoded information.
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ["h", "e", "l", "l", "o"] -> "hello"
mask_and_scale : bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue). If the _Unsigned attribute is present
treat integer arrays as unsigned.
decode_times : bool
Decode cf times ("hours since 2000-01-01") to np.datetime64.
decode_endianness : bool
Decode arrays from non-native to native endianness.
stack_char_dim : bool
Whether to stack characters into bytes along the last dimension of this
array. Passed as an argument because we need to look at the full
dataset to figure out if this is appropriate.
use_cftime : bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
out : Variable
A variable holding the decoded equivalent of var.
"""
var = as_variable(var)
original_dtype = var.dtype
if decode_timedelta is None:
decode_timedelta = decode_times
if concat_characters:
if stack_char_dim:
var = strings.CharacterArrayCoder().decode(var, name=name)
var = strings.EncodedStringCoder().decode(var)
if mask_and_scale:
for coder in [
variables.UnsignedIntegerCoder(),
variables.CFMaskCoder(),
variables.CFScaleOffsetCoder(),
]:
var = coder.decode(var, name=name)
if decode_timedelta:
var = times.CFTimedeltaCoder().decode(var, name=name)
if decode_times:
var = times.CFDatetimeCoder(use_cftime=use_cftime).decode(var, name=name)
dimensions, data, attributes, encoding = variables.unpack_for_decoding(var)
# TODO(shoyer): convert everything below to use coders
if decode_endianness and not data.dtype.isnative:
# do this last, so it's only done if we didn't already unmask/scale
data = NativeEndiannessArray(data)
original_dtype = data.dtype
encoding.setdefault("dtype", original_dtype)
if "dtype" in attributes and attributes["dtype"] == "bool":
del attributes["dtype"]
data = BoolTypeArray(data)
if not is_duck_dask_array(data):
data = indexing.LazilyIndexedArray(data)
return Variable(dimensions, data, attributes, encoding=encoding)
def _update_bounds_attributes(variables):
"""Adds time attributes to time bounds variables.
Variables handling time bounds ("Cell boundaries" in the CF
conventions) do not necessarily carry the necessary attributes to be
decoded. This copies the attributes from the time variable to the
associated boundaries.
See Also:
http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/
cf-conventions.html#cell-boundaries
https://github.com/pydata/xarray/issues/2565
"""
# For all time variables with bounds
for v in variables.values():
attrs = v.attrs
has_date_units = "units" in attrs and "since" in attrs["units"]
if has_date_units and "bounds" in attrs:
if attrs["bounds"] in variables:
bounds_attrs = variables[attrs["bounds"]].attrs
bounds_attrs.setdefault("units", attrs["units"])
if "calendar" in attrs:
bounds_attrs.setdefault("calendar", attrs["calendar"])
def _update_bounds_encoding(variables):
"""Adds time encoding to time bounds variables.
Variables handling time bounds ("Cell boundaries" in the CF
conventions) do not necessarily carry the necessary attributes to be
decoded. This copies the encoding from the time variable to the
associated bounds variable so that we write CF-compliant files.
See Also:
http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/
cf-conventions.html#cell-boundaries
https://github.com/pydata/xarray/issues/2565
"""
# For all time variables with bounds
for v in variables.values():
attrs = v.attrs
encoding = v.encoding
has_date_units = "units" in encoding and "since" in encoding["units"]
is_datetime_type = np.issubdtype(
v.dtype, np.datetime64
) or contains_cftime_datetimes(v)
if (
is_datetime_type
and not has_date_units
and "bounds" in attrs
and attrs["bounds"] in variables
):
warnings.warn(
"Variable '{0}' has datetime type and a "
"bounds variable but {0}.encoding does not have "
"units specified. The units encodings for '{0}' "
"and '{1}' will be determined independently "
"and may not be equal, counter to CF-conventions. "
"If this is a concern, specify a units encoding for "
"'{0}' before writing to a file.".format(v.name, attrs["bounds"]),
UserWarning,
)
if has_date_units and "bounds" in attrs:
if attrs["bounds"] in variables:
bounds_encoding = variables[attrs["bounds"]].encoding
bounds_encoding.setdefault("units", encoding["units"])
if "calendar" in encoding:
bounds_encoding.setdefault("calendar", encoding["calendar"])
def decode_cf_variables(
variables,
attributes,
concat_characters=True,
mask_and_scale=True,
decode_times=True,
decode_coords=True,
drop_variables=None,
use_cftime=None,
decode_timedelta=None,
):
"""
Decode several CF encoded variables.
See: decode_cf_variable
"""
dimensions_used_by = defaultdict(list)
for v in variables.values():
for d in v.dims:
dimensions_used_by[d].append(v)
def stackable(dim):
# figure out if a dimension can be concatenated over
if dim in variables:
return False
for v in dimensions_used_by[dim]:
if v.dtype.kind != "S" or dim != v.dims[-1]:
return False
return True
coord_names = set()
if isinstance(drop_variables, str):
drop_variables = [drop_variables]
elif drop_variables is None:
drop_variables = []
drop_variables = set(drop_variables)
# Time bounds coordinates might miss the decoding attributes
if decode_times:
_update_bounds_attributes(variables)
new_vars = {}
for k, v in variables.items():
if k in drop_variables:
continue
stack_char_dim = (
concat_characters
and v.dtype == "S1"
and v.ndim > 0
and stackable(v.dims[-1])
)
new_vars[k] = decode_cf_variable(
k,
v,
concat_characters=concat_characters,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
stack_char_dim=stack_char_dim,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
if decode_coords in [True, "coordinates", "all"]:
var_attrs = new_vars[k].attrs
if "coordinates" in var_attrs:
coord_str = var_attrs["coordinates"]
var_coord_names = coord_str.split()
if all(k in variables for k in var_coord_names):
new_vars[k].encoding["coordinates"] = coord_str
del var_attrs["coordinates"]
coord_names.update(var_coord_names)
if decode_coords == "all":
for attr_name in CF_RELATED_DATA:
if attr_name in var_attrs:
attr_val = var_attrs[attr_name]
if attr_name not in CF_RELATED_DATA_NEEDS_PARSING:
var_names = attr_val.split()
else:
roles_and_names = [
role_or_name
for part in attr_val.split(":")
for role_or_name in part.split()
]
if len(roles_and_names) % 2 == 1:
warnings.warn(
f"Attribute {attr_name:s} malformed", stacklevel=5
)
var_names = roles_and_names[1::2]
if all(var_name in variables for var_name in var_names):
new_vars[k].encoding[attr_name] = attr_val
coord_names.update(var_names)
else:
referenced_vars_not_in_variables = [
proj_name
for proj_name in var_names
if proj_name not in variables
]
warnings.warn(
f"Variable(s) referenced in {attr_name:s} not in variables: {referenced_vars_not_in_variables!s}",
stacklevel=5,
)
del var_attrs[attr_name]
if decode_coords and "coordinates" in attributes:
attributes = dict(attributes)
coord_names.update(attributes.pop("coordinates").split())
return new_vars, attributes, coord_names
def decode_cf(
obj,
concat_characters=True,
mask_and_scale=True,
decode_times=True,
decode_coords=True,
drop_variables=None,
use_cftime=None,
decode_timedelta=None,
):
"""Decode the given Dataset or Datastore according to CF conventions into
a new Dataset.
Parameters
----------
obj : Dataset or DataStore
Object to decode.
concat_characters : bool, optional
Should character arrays be concatenated to strings, for
example: ["h", "e", "l", "l", "o"] -> "hello"
mask_and_scale : bool, optional
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool, optional
Decode cf times (e.g., integers since "hours since 2000-01-01") to
np.datetime64.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
drop_variables : str or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
use_cftime : bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
Returns
-------
decoded : Dataset
"""
from .backends.common import AbstractDataStore
from .core.dataset import Dataset
if isinstance(obj, Dataset):
vars = obj._variables
attrs = obj.attrs
extra_coords = set(obj.coords)
close = obj._close
encoding = obj.encoding
elif isinstance(obj, AbstractDataStore):
vars, attrs = obj.load()
extra_coords = set()
close = obj.close
encoding = obj.get_encoding()
else:
raise TypeError("can only decode Dataset or DataStore objects")
vars, attrs, coord_names = decode_cf_variables(
vars,
attrs,
concat_characters,
mask_and_scale,
decode_times,
decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
ds = Dataset(vars, attrs=attrs)
ds = ds.set_coords(coord_names.union(extra_coords).intersection(vars))
ds.set_close(close)
ds.encoding = encoding
return ds
def cf_decoder(
variables,
attributes,
concat_characters=True,
mask_and_scale=True,
decode_times=True,
):
"""
Decode a set of CF encoded variables and attributes.
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ["h", "e", "l", "l", "o"] -> "hello"
mask_and_scale : bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ("hours since 2000-01-01") to np.datetime64.
Returns
-------
decoded_variables : dict
A dictionary mapping from variable name to xarray.Variable objects.
decoded_attributes : dict
A dictionary mapping from attribute name to values.
See Also
--------
decode_cf_variable
"""
variables, attributes, _ = decode_cf_variables(
variables, attributes, concat_characters, mask_and_scale, decode_times
)
return variables, attributes
def _encode_coordinates(variables, attributes, non_dim_coord_names):
# calculate global and variable specific coordinates
non_dim_coord_names = set(non_dim_coord_names)
for name in list(non_dim_coord_names):
if isinstance(name, str) and " " in name:
warnings.warn(
"coordinate {!r} has a space in its name, which means it "
"cannot be marked as a coordinate on disk and will be "
"saved as a data variable instead".format(name),
SerializationWarning,
stacklevel=6,
)
non_dim_coord_names.discard(name)
global_coordinates = non_dim_coord_names.copy()
variable_coordinates = defaultdict(set)
not_technically_coordinates = set()
for coord_name in non_dim_coord_names:
target_dims = variables[coord_name].dims
for k, v in variables.items():
if (
k not in non_dim_coord_names
and k not in v.dims
and set(target_dims) <= set(v.dims)
):
variable_coordinates[k].add(coord_name)
if any(
attr_name in v.encoding and coord_name in v.encoding.get(attr_name)
for attr_name in CF_RELATED_DATA
):
not_technically_coordinates.add(coord_name)
global_coordinates.discard(coord_name)
variables = {k: v.copy(deep=False) for k, v in variables.items()}
# keep track of variable names written to file under the "coordinates" attributes
written_coords = set()
for name, var in variables.items():
encoding = var.encoding
attrs = var.attrs
if "coordinates" in attrs and "coordinates" in encoding:
raise ValueError(
f"'coordinates' found in both attrs and encoding for variable {name!r}."
)
# if coordinates set to None, don't write coordinates attribute
if (
"coordinates" in attrs
and attrs.get("coordinates") is None
or "coordinates" in encoding
and encoding.get("coordinates") is None
):
# make sure "coordinates" is removed from attrs/encoding
attrs.pop("coordinates", None)
encoding.pop("coordinates", None)
continue
# this will copy coordinates from encoding to attrs if "coordinates" in attrs
# after the next line, "coordinates" is never in encoding
# we get support for attrs["coordinates"] for free.
coords_str = pop_to(encoding, attrs, "coordinates")
if not coords_str and variable_coordinates[name]:
coordinates_text = " ".join(
str(coord_name)
for coord_name in variable_coordinates[name]
if coord_name not in not_technically_coordinates
)
if coordinates_text:
attrs["coordinates"] = coordinates_text
if "coordinates" in attrs:
written_coords.update(attrs["coordinates"].split())
# These coordinates are not associated with any particular variables, so we
# save them under a global 'coordinates' attribute so xarray can roundtrip
# the dataset faithfully. Because this serialization goes beyond CF
# conventions, only do it if necessary.
# Reference discussion:
# http://mailman.cgd.ucar.edu/pipermail/cf-metadata/2014/007571.html
global_coordinates.difference_update(written_coords)
if global_coordinates:
attributes = dict(attributes)
if "coordinates" in attributes:
warnings.warn(
f"cannot serialize global coordinates {global_coordinates!r} because the global "
f"attribute 'coordinates' already exists. This may prevent faithful roundtripping"
f"of xarray datasets",
SerializationWarning,
)
else:
attributes["coordinates"] = " ".join(map(str, global_coordinates))
return variables, attributes
def encode_dataset_coordinates(dataset):
"""Encode coordinates on the given dataset object into variable specific
and global attributes.
When possible, this is done according to CF conventions.
Parameters
----------
dataset : Dataset
Object to encode.
Returns
-------
variables : dict
attrs : dict
"""
non_dim_coord_names = set(dataset.coords) - set(dataset.dims)
return _encode_coordinates(
dataset._variables, dataset.attrs, non_dim_coord_names=non_dim_coord_names
)
def cf_encoder(variables, attributes):
"""
Encode a set of CF encoded variables and attributes.
Takes a dicts of variables and attributes and encodes them
to conform to CF conventions as much as possible.
This includes masking, scaling, character array handling,
and CF-time encoding.
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
Returns
-------
encoded_variables : dict
A dictionary mapping from variable name to xarray.Variable,
encoded_attributes : dict
A dictionary mapping from attribute name to value
See Also
--------
decode_cf_variable, encode_cf_variable
"""
# add encoding for time bounds variables if present.
_update_bounds_encoding(variables)
new_vars = {k: encode_cf_variable(v, name=k) for k, v in variables.items()}
# Remove attrs from bounds variables (issue #2921)
for var in new_vars.values():
bounds = var.attrs["bounds"] if "bounds" in var.attrs else None
if bounds and bounds in new_vars:
# see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries
for attr in [
"units",
"standard_name",
"axis",
"positive",
"calendar",
"long_name",
"leap_month",
"leap_year",
"month_lengths",
]:
if attr in new_vars[bounds].attrs and attr in var.attrs:
if new_vars[bounds].attrs[attr] == var.attrs[attr]:
new_vars[bounds].attrs.pop(attr)
return new_vars, attributes
|
|
from matplotlib import pyplot
import numpy
class roll_state:
def __init__(self, dice=[], score=0):
self.dice = numpy.array(dice, dtype=numpy.int32)
self.score = score
def __str__(self):
return "dice: " + str(self.dice) + " score: " + str(self.score)
def is_sequential(state):
if len(state.dice) > 1:
for i in range(len(state.dice[:-1])):
if state.dice[i] + 1 != state.dice[i + 1]:
return False
return True
def score_runs(state):
states = [state]
if len(state.dice) >= 5:
if is_sequential(state):
states.append(roll_state([], state.score + 100*len(state.dice)))
return states
def score_fives(state):
states = [state]
dice = list(state.dice)
count = 0
while (5 in dice) and (count < 2):
i = dice.index(5)
del dice[i]
count += 1
states.append(roll_state(dice, state.score + count*50))
return states
def score_ones(state):
states = [state]
dice = list(state.dice)
count = 0
while (1 in dice) and (count < 2):
i = dice.index(1)
del dice[i]
count += 1
states.append(roll_state(dice, state.score + count*100))
return states
def score_nofakind(state):
counter = {}
for die in state.dice:
if die in counter:
counter[die] += 1
else:
counter[die] = 1
new_state = roll_state(state.dice, state.score)
use_new_state = False
for key, value in counter.items():
if value > 2:
use_new_state = True
new_state.dice = [die for die in new_state.dice if die != key]
if key == 1:
new_state.score = new_state.score + 1000*2**(value - 3)
else:
new_state.score = new_state.score + 100*key*2**(value - 3)
if use_new_state:
return [state, new_state]
else:
return [state]
def get_options(state):
states = score_runs(state)
temp_states = []
for state in states:
temp_states = temp_states + score_fives(state)
states = temp_states
temp_states = []
for state in states:
temp_states = temp_states + score_ones(state)
states = temp_states
temp_states = []
for state in states:
temp_states = temp_states + score_nofakind(state)
states = temp_states
return states[1:]
def greedy_stgy():
def choose_dice(state):
states = sorted(get_options(state), key=lambda obj: obj.score)
if len(states) > 0:
return states[-1]
else:
return None
return choose_dice
def max_dice_stgy(exp_low=49, exp_high=101, max_dice=6):
def choose_dice(state):
def srt(obj):
if len(obj.dice) == 0:
# Assume moderate score next roll
return obj.score + exp_low*max_dice
elif len(obj.dice) < 3:
# Take anything
return obj.score + exp_low*len(obj.dice)
else:
# Take better than 100 per die
return obj.score + exp_high*len(obj.dice)
states = sorted(get_options(state), key=srt)
if len(states) > 0:
return states[-1]
else:
return None
return choose_dice
def score_cap_stgy(max_score=400):
def stop_turn(num_dice, score):
if score >= max_score:
return True
else:
return False
return stop_turn
def num_dice_stgy(min_dice=2):
def stop_turn(num_dice, score):
if num_dice <= min_dice:
return True
else:
return False
return stop_turn
def exp_gain_stgy(die_gain=100):
def stop_turn(num_dice, score):
if (score + die_gain*num_dice)*(1.0 - (2.0/3.0)**num_dice) < score:
return True
else:
return False
return stop_turn
def composite_stgy(min_score=400, min_dice=2):
def stop_turn(num_dice, score):
if (score >= min_score) and (num_dice <= min_dice):
return True
else:
return False
return stop_turn
def ncomposite_stgy(min_score=400, min_dice=2):
def stop_turn(num_dice, score):
if (score < min_score) or (num_dice < min_dice):
return True
else:
return False
return stop_turn
def nscore_cap_stgy(max_score=400):
def stop_turn(num_dice, score):
if score < max_score:
return True
else:
return False
return stop_turn
class zilch:
def __init__(self, players, additive=True):
# players is dict of distinct players
self.players = players
self.players_order = [player for player in self.players.values()]
self.additive = additive
self.end_condition = 10000
self.tie_breaker = 5000
numpy.random.shuffle(self.players_order)
def play_game(self):
roll = (None, 0) # Null roll
end_condition = self.end_condition
while True:
end_game = False
for player in self.players_order:
if self.additive:
roll = player.take_turn(roll)
else:
roll = player.take_turn()
if player.score >= end_condition:
end_game = True
if end_game:
scores = [player.score for player in self.players_order]
if len(scores) > 1:
if scores[-1] == scores[-2]:
end_condition += self.tie_breaker
continue
#break tie
break
#end game
return self.players
class zilch_player:
def __init__(self, num_dice = 6, strategy=None):
self.score = 0
self.num_dice = num_dice
if strategy is None:
self.strategy = (greedy_stgy(), score_cap_stgy(), num_dice_stgy())
else:
self.strategy = strategy
def take_turn(self, roll=(None, 0)):
if roll[0] is None:
roll = (self.num_dice, 0)
else:
if self.strategy[2](roll[0], roll[1]):
roll = (self.num_dice, 0)
while True:
roll = self.roll(roll[0], roll[1])
if roll[0] is None:
break
#zilch
if roll[0] == 0:
roll = (self.num_dice, roll[1])
if self.strategy[1](roll[0], roll[1]):
break
#scored
self.score += roll[1]
return roll
def roll(self, num_dice, score=0):
# num_dice random integers from 1 to 6
dice = numpy.sort(numpy.random.randint(1, 7, (num_dice)))
state = roll_state(dice)
state = self.strategy[0](state)
if state is not None:
return len(state.dice), state.score + score
else:
return None, 0
data = []
for i in range(10000):
# me = zilch_player(strategy=(max_dice_stgy(), composite_stgy(500, 2), ncomposite_stgy(1000, 2)))
me = zilch_player(strategy=(greedy_stgy(), composite_stgy(300, 2), ncomposite_stgy(600, 2)))
you = zilch_player(strategy=(greedy_stgy(), exp_gain_stgy(50), ncomposite_stgy(600, 2)))
game = zilch({"me":me, "you":you}, additive=True)
players = game.play_game()
data.append([players["me"].score, players["you"].score])
# data.append(game.play_game())
data = numpy.array(data)
data = numpy.array([1 if x>y else 0 for x, y in data])
print("me win rate: " + str(numpy.mean(data)))
print("error: " + str(numpy.std(data)/numpy.sqrt(len(data))))
|
|
"""Manage images.
We make an important design decision: Importing images requires root
privilege; unlike rkt, which does not. We make this decision for the
simplicity of implementation. To not require root privilege, rkt has to
split import into two steps:
* The first step, the ``fetch`` command, merely copies a tar archive to
the image repository (after optionally verifying archive's signature).
This step does not require root privilege given that the image
repository's directory write permission is properly configured.
* The second step, the ``prepare`` command, extracts the tar archive.
This step requires root privilege to create files extracted from the
tar archive that are owned by root.
In the future we might adopt rkt's design; for now, we trade security
for implementation simplicity.
Image repository layout:
* Under ``images`` there are three top-level directories: trees, tags,
and tmp.
* ``trees`` is the directory of extracted tar archives.
* ``trees/<sha512>`` is the directory of an image, where ``sha512`` is
the SHA512 of the tar archive.
* ``trees/<sha512>/metadata`` stores image metadata in JSON format.
* ``trees/<sha512>/rootfs`` is the root directory of image.
* ``tags`` is a directory of symlinks to images under ``trees``.
* ``tmp`` is a scratchpad for extracting the tar archive. After the
extraction is completed, the output is moved into the ``trees``
directory.
"""
__all__ = [
# Public interface.
'ImageMetadata',
# Expose to apps.
'IMAGE_LIST_STRINGIFIERS',
'cmd_build_image',
'cmd_cleanup',
'cmd_import',
'cmd_init',
'cmd_list',
'cmd_remove',
'cmd_remove_tag',
'cmd_tag',
'make_select_image_kwargs',
# Expose to builders, pods, and xars.
'add_ref',
'build_image',
'find_id',
'find_name_and_version',
'get_image_dir_path',
'get_rootfs_path',
'get_trees_path',
'read_metadata',
'select_image_arguments',
'touch',
]
import contextlib
import dataclasses
import datetime
import gzip
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
from pathlib import Path
import g1.files
from g1 import scripts
from g1.bases import argparses
from g1.bases import datetimes
from g1.bases import functionals
from g1.bases import oses
from g1.bases.assertions import ASSERT
from g1.files import locks
from g1.texts import jsons
from g1.texts.columns import argparses as columns_argparses
from . import bases
from . import models
LOG = logging.getLogger(__name__)
#
# Data type.
#
@dataclasses.dataclass(frozen=True)
class ImageMetadata:
name: str
version: str
def __post_init__(self):
models.validate_image_name(self.name)
models.validate_image_version(self.version)
#
# Top-level commands. You need to check root privilege and acquire all
# file locks here.
#
# NOTE: When locking multiple top-level directories, lock them in
# alphabetical order to avoid deadlock.
#
# TODO: For now our locking strategy is very naive - we simply lock the
# top-level directory. If this turns out to cause a lot of lock
# contention, we should implement a finer-grained locking strategy.
#
select_image_arguments = functionals.compose(
argparses.begin_mutually_exclusive_group(required=True),
argparses.argument(
'--id',
type=models.validate_image_id,
help='provide image id',
),
argparses.argument(
'--nv',
metavar=('NAME', 'VERSION'),
# Sadly it looks like you can't use ``type`` with ``nargs``.
nargs=2,
help='provide image name and version',
),
argparses.argument(
'--tag',
type=models.validate_image_tag,
help='provide image tag',
),
argparses.end,
)
image_output_arguments = functionals.compose(
argparses.argument(
'name',
type=models.validate_image_name,
help='provide output image name',
),
argparses.argument(
'version',
type=models.validate_image_version,
help='provide output image version',
),
argparses.argument(
'output',
type=Path,
help='provide output image path',
),
)
def make_select_image_kwargs(args):
return {
'image_id': args.id,
'name': models.validate_image_name(args.nv[0]) if args.nv else None,
'version':
models.validate_image_version(args.nv[1]) if args.nv else None,
'tag': args.tag,
}
def cmd_init():
"""Initialize the image repository."""
# For _extract_image.
scripts.assert_command_exist('tar')
# For build_image.
scripts.check_command_exist('tar')
oses.assert_root_privilege()
bases.make_dir(_get_image_repo_path(), 0o750, bases.chown_app)
bases.make_dir(_get_tags_path(), 0o750, bases.chown_app)
bases.make_dir(_get_tmp_path(), 0o750, bases.chown_app)
bases.make_dir(get_trees_path(), 0o750, bases.chown_app)
@argparses.begin_parser('build', **argparses.make_help_kwargs('build image'))
@argparses.argument(
'--rootfs',
type=Path,
required=True,
help='provide rootfs path',
)
@image_output_arguments
@argparses.end
def cmd_build_image(name, version, rootfs_path, output_path):
# Although root privilege is not required, most likely you need it
# to finish this.
ASSERT.predicate(rootfs_path, Path.is_dir)
build_image(
ImageMetadata(name=name, version=version),
lambda dst_path: bases.rsync_copy(rootfs_path, dst_path),
output_path,
)
@argparses.begin_parser(
'import', **argparses.make_help_kwargs('import an image archive')
)
@argparses.argument(
'--tag', type=models.validate_image_tag, help='provide new image tag'
)
@argparses.argument(
'path', type=Path, help='import image archive from this path'
)
@argparses.end
def cmd_import(image_archive_path, *, tag=None):
"""Import an image archive into the repo.
This is a no-op if the image has been imported (i.e., an image in
the repo has the same ID).
For images having the same name and version, it is an error to have
different IDs.
"""
oses.assert_root_privilege()
ASSERT.predicate(image_archive_path, Path.is_file)
with _using_tmp() as tmp_path:
image_id = _extract_image(image_archive_path, tmp_path)
LOG.info('import image id: %s', image_id)
_setup_image_dir(tmp_path)
# Make sure that for every newly-imported image, its last
# updated time is set to now; or else it could be cleaned up
# right after import.
_touch_image_dir(tmp_path)
with contextlib.ExitStack() as stack:
if tag:
stack.enter_context(
locks.acquiring_exclusive(_get_tags_path())
)
stack.enter_context(locks.acquiring_exclusive(get_trees_path()))
if not _maybe_import_image_dir(tmp_path, image_id):
return
if tag:
image_dir_path = get_image_dir_path(image_id)
try:
_tag_image(tag, image_dir_path)
except:
LOG.error('cannot tag image; revert import')
if not _maybe_remove_image_dir(image_dir_path):
LOG.error('cannot revert import')
raise
_IMAGE_LIST_COLUMNS = frozenset((
'id',
'name',
'version',
'tags',
'ref-count',
'last-updated',
'rootfs',
))
_IMAGE_LIST_DEFAULT_COLUMNS = (
'id',
'name',
'version',
'tags',
'ref-count',
'last-updated',
)
IMAGE_LIST_STRINGIFIERS = {
'tags': ' '.join,
'last-updated': datetime.datetime.isoformat,
}
ASSERT.issuperset(_IMAGE_LIST_COLUMNS, _IMAGE_LIST_DEFAULT_COLUMNS)
ASSERT.issuperset(_IMAGE_LIST_COLUMNS, IMAGE_LIST_STRINGIFIERS)
@argparses.begin_parser('list', **argparses.make_help_kwargs('list images'))
@columns_argparses.columnar_arguments(
_IMAGE_LIST_COLUMNS, _IMAGE_LIST_DEFAULT_COLUMNS
)
@argparses.end
def cmd_list():
# Don't need root privilege here.
with locks.acquiring_shared(_get_tags_path()), \
locks.acquiring_shared(get_trees_path()):
for image_dir_path, metadata in _iter_metadatas():
image_id = _get_id(image_dir_path)
last_updated = _get_last_updated(image_dir_path)
yield {
'id': image_id,
'name': metadata.name,
'version': metadata.version,
'tags': _find_tags(image_id),
'ref-count': _get_ref_count(image_dir_path),
'last-updated': last_updated,
'rootfs': get_rootfs_path(image_dir_path),
}
@argparses.begin_parser(
'tag', **argparses.make_help_kwargs('set tag to an image')
)
@select_image_arguments
@argparses.argument(
'new_tag', type=models.validate_image_tag, help='provide new image tag'
)
@argparses.end
def cmd_tag(*, image_id=None, name=None, version=None, tag=None, new_tag):
oses.assert_root_privilege()
with locks.acquiring_exclusive(_get_tags_path()):
with locks.acquiring_shared(get_trees_path()):
image_dir_path = ASSERT.not_none(
_find_image_dir_path(image_id, name, version, tag)
)
_tag_image(new_tag, image_dir_path)
@argparses.begin_parser(
'remove-tag', **argparses.make_help_kwargs('remove tag from an image')
)
@argparses.argument(
'tag',
type=models.validate_image_tag,
help='provide image tag for removal',
)
@argparses.end
def cmd_remove_tag(tag):
oses.assert_root_privilege()
with locks.acquiring_exclusive(_get_tags_path()):
try:
_get_tag_path(tag).unlink()
except FileNotFoundError:
pass
@argparses.begin_parser(
'remove',
**argparses.make_help_kwargs('remove an image from the repository'),
)
@argparses.argument(
'--skip-active',
action=argparses.StoreBoolAction,
default=False,
help='skip removing active image (default: %(default_string)s)',
)
@select_image_arguments
@argparses.end
def cmd_remove(
*, image_id=None, name=None, version=None, tag=None, skip_active=False
):
"""Remove an image, or no-op if image does not exist."""
oses.assert_root_privilege()
with locks.acquiring_exclusive(_get_tags_path()), \
locks.acquiring_exclusive(get_trees_path()):
image_dir_path = _find_image_dir_path(image_id, name, version, tag)
if image_dir_path:
ASSERT.true(_maybe_remove_image_dir(image_dir_path) or skip_active)
else:
LOG.debug(
'image does not exist: image_id=%s, nv=%s:%s, tag=%s',
image_id, name, version, tag
)
@argparses.begin_parser(
'cleanup', **argparses.make_help_kwargs('clean up image repository')
)
@bases.grace_period_arguments
@argparses.end
def cmd_cleanup(expiration):
oses.assert_root_privilege()
with locks.acquiring_exclusive(_get_tmp_path()):
_cleanup_tmp()
with locks.acquiring_exclusive(_get_tags_path()), \
locks.acquiring_exclusive(get_trees_path()):
_cleanup_trees(expiration)
_cleanup_tags()
#
# Locking strategy.
#
@contextlib.contextmanager
def _using_tmp():
tmp_dir_path = _get_tmp_path()
tmp_path = None
tmp_lock = None
with locks.acquiring_exclusive(tmp_dir_path):
try:
tmp_path = Path(tempfile.mkdtemp(dir=tmp_dir_path))
tmp_lock = locks.FileLock(tmp_path)
tmp_lock.acquire_exclusive()
except:
if tmp_path:
g1.files.remove(tmp_path)
if tmp_lock:
tmp_lock.release()
tmp_lock.close()
raise
try:
yield tmp_path
finally:
g1.files.remove(tmp_path)
tmp_lock.release()
tmp_lock.close()
#
# Repo layout.
#
_IMAGES = 'images'
_TAGS = 'tags'
_TREES = 'trees'
_TMP = 'tmp'
_METADATA = 'metadata'
_ROOTFS = 'rootfs'
def _get_image_repo_path():
return bases.get_repo_path() / _IMAGES
def _get_tags_path():
return _get_image_repo_path() / _TAGS
def get_trees_path():
return _get_image_repo_path() / _TREES
def _get_tmp_path():
return _get_image_repo_path() / _TMP
def get_image_dir_path(image_id):
return get_trees_path() / models.validate_image_id(image_id)
def _get_id(image_dir_path):
return models.validate_image_id(image_dir_path.name)
def _get_metadata_path(image_dir_path):
return image_dir_path / _METADATA
def get_rootfs_path(image_dir_path):
return image_dir_path / _ROOTFS
def _get_tag_path(tag):
return _get_tags_path() / models.validate_image_tag(tag)
def _get_tag(tag_path):
return models.validate_image_tag(tag_path.name)
def _get_tag_target(image_dir_path):
return Path('..') / _TREES / _get_id(image_dir_path)
#
# Functions below require caller acquiring locks.
#
#
# Top-level directories.
#
def _cleanup_tmp():
for tmp_path in _get_tmp_path().iterdir():
if not tmp_path.is_dir():
LOG.info('remove unknown temporary file: %s', tmp_path)
tmp_path.unlink()
continue
tmp_lock = locks.try_acquire_exclusive(tmp_path)
if not tmp_lock:
continue
try:
LOG.info('remove temporary directory: %s', tmp_path)
shutil.rmtree(tmp_path)
finally:
tmp_lock.release()
tmp_lock.close()
def _cleanup_trees(expiration):
LOG.info('remove images before: %s', expiration)
for image_dir_path in get_trees_path().iterdir():
if image_dir_path.is_dir():
if _get_last_updated(image_dir_path) < expiration:
_maybe_remove_image_dir(image_dir_path)
else:
LOG.info('remove unknown file under trees: %s', image_dir_path)
image_dir_path.unlink()
def _cleanup_tags():
for tag_path in _get_tags_path().iterdir():
if tag_path.is_symlink():
if not tag_path.resolve().exists():
LOG.info('remove dangling tag: %s', tag_path)
tag_path.unlink()
else:
LOG.info('remove unknown file under tags: %s', tag_path)
g1.files.remove(tag_path)
#
# Image builder.
#
def build_image(metadata, make_rootfs, output_path):
ASSERT.not_predicate(output_path, g1.files.lexists)
with tempfile.TemporaryDirectory(
dir=output_path.parent,
prefix=output_path.name + '-',
) as temp_output_dir_path:
temp_output_dir_path = Path(temp_output_dir_path)
_write_metadata(metadata, temp_output_dir_path)
make_rootfs(get_rootfs_path(temp_output_dir_path))
_setup_image_dir(temp_output_dir_path)
scripts.run([
'tar',
'--create',
*('--file', output_path),
'--gzip',
*('--directory', temp_output_dir_path),
_METADATA,
_ROOTFS,
])
#
# Image extraction.
#
def _extract_image(archive_path, dst_dir_path):
# We assume archive is always gzip-compressed for now.
hasher = hashlib.sha256()
# If we are running as root, we can and should preserve the
# original owners and permissions.
i_am_root = oses.has_root_privilege()
# TODO: Should we use stdlib's tarfile rather than calling tar?
with scripts.using_stdin(subprocess.PIPE), scripts.popen([
'tar',
'--extract',
*('--file', '-'),
*('--directory', dst_dir_path),
*(('--same-owner', '--same-permissions') if i_am_root else ()),
]) as proc:
try:
with gzip.open(archive_path, 'rb') as archive:
while True:
data = archive.read(4096)
if not data:
break
proc.stdin.write(data)
hasher.update(data)
except:
proc.kill()
raise
else:
proc.stdin.close()
proc.wait()
ASSERT.equal(proc.poll(), 0)
return hasher.hexdigest()
def _setup_image_dir(image_dir_path):
bases.setup_file(image_dir_path, 0o750, bases.chown_app)
bases.setup_file(
_get_metadata_path(image_dir_path), 0o640, bases.chown_app
)
bases.setup_file(get_rootfs_path(image_dir_path), 0o755, bases.chown_root)
#
# Image directories.
#
def _maybe_import_image_dir(src_path, image_id):
image_dir_path = get_image_dir_path(image_id)
if image_dir_path.exists():
LOG.warning('not import duplicated image: %s', image_id)
return False
else:
_assert_unique_name_and_version(read_metadata(src_path))
src_path.rename(image_dir_path)
return True
def _assert_unique_name_and_version(new_metadata):
for image_dir_path, metadata in _iter_metadatas():
ASSERT(
new_metadata.name != metadata.name
or new_metadata.version != metadata.version,
'expect unique image name and version: {}, {}',
image_dir_path,
new_metadata,
)
def _iter_image_dir_paths():
for image_dir_path in get_trees_path().iterdir():
if not image_dir_path.is_dir():
LOG.debug('encounter unknown file under trees: %s', image_dir_path)
else:
yield image_dir_path
def _find_image_dir_path(image_id, name, version, tag):
"""Return path to image directory or None if not found."""
ASSERT.only_one((image_id, name or version, tag))
ASSERT.not_xor(name, version)
if name:
# We check duplicated image name and version when images are
# imported, and so we do not check it again here.
for image_dir_path in _iter_image_dir_paths():
metadata = read_metadata(image_dir_path)
if metadata.name == name and metadata.version == version:
return image_dir_path
return None
if image_id:
image_dir_path = get_image_dir_path(image_id)
else:
tag_path = _get_tag_path(tag)
if not g1.files.lexists(tag_path):
return None
image_dir_path = _get_image_dir_path_from_tag(tag_path)
return image_dir_path if image_dir_path.is_dir() else None
def find_id(*, name=None, version=None, tag=None):
image_dir_path = _find_image_dir_path(None, name, version, tag)
return _get_id(image_dir_path) if image_dir_path else None
def find_name_and_version(*, image_id=None, tag=None):
image_dir_path = _find_image_dir_path(image_id, None, None, tag)
if image_dir_path is None:
return None, None
else:
metadata = read_metadata(image_dir_path)
return metadata.name, metadata.version
def _maybe_remove_image_dir(image_dir_path):
if _get_ref_count(image_dir_path) <= 1:
LOG.info('remove image directory: %s', image_dir_path)
for tag_path in _find_tag_paths(image_dir_path):
tag_path.unlink()
if image_dir_path.exists():
shutil.rmtree(image_dir_path)
return True
else:
LOG.warning('not remove active image directory: %s', image_dir_path)
return False
#
# Metadata.
#
def _iter_metadatas():
"""Iterate over metadata of every image."""
for image_dir_path in _iter_image_dir_paths():
yield image_dir_path, read_metadata(image_dir_path)
def read_metadata(image_dir_path):
"""Read image metadata from an image directory."""
return jsons.load_dataobject(
ImageMetadata, _get_metadata_path(image_dir_path)
)
def _write_metadata(metadata, image_dir_path):
jsons.dump_dataobject(metadata, _get_metadata_path(image_dir_path))
def add_ref(image_id, dst_path):
os.link(
ASSERT.predicate(
_get_metadata_path(get_image_dir_path(image_id)), Path.is_file
),
dst_path,
)
def _get_ref_count(image_dir_path):
try:
return _get_metadata_path(image_dir_path).stat().st_nlink
except FileNotFoundError:
return 0
def touch(image_id):
_touch_image_dir(get_image_dir_path(image_id))
def _touch_image_dir(image_dir_path):
ASSERT.predicate(_get_metadata_path(image_dir_path), Path.is_file).touch()
def _get_last_updated(image_dir_path):
return datetimes.utcfromtimestamp(
_get_metadata_path(image_dir_path).stat().st_mtime
)
#
# Tags.
#
def _get_image_dir_path_from_tag(tag_path):
return ASSERT.predicate(tag_path, Path.is_symlink).resolve()
def _find_tags(image_id):
return sorted(map(_get_tag, _find_tag_paths(get_image_dir_path(image_id))))
def _find_tag_paths(image_dir_path):
for tag_path in _get_tags_path().iterdir():
if not tag_path.is_symlink():
LOG.debug('encounter unknown file under tags: %s', tag_path)
elif tag_path.resolve().name == image_dir_path.name:
yield tag_path
def _tag_image(tag, image_dir_path):
tag_path = _get_tag_path(tag)
# ".tmp" is not a validate tag, and so it will not conflict.
new_tag_path = tag_path.with_suffix('.tmp')
new_tag_path.symlink_to(_get_tag_target(image_dir_path))
new_tag_path.replace(tag_path)
|
|
#!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""dbutil.py - PostgreSQL database utilities for testing.
This module provides easy access to the test database server, and
provides a way to create, load, save and drop databases from this server.
It also provides wrapper classes for psycopg2 database connections that
implement utility queries as methods.
"""
from __future__ import absolute_import
import os
import sys
import logging
import random
import subprocess
import re
import psycopg2
#
# Root directory for test resources.
#
TEST_RESOURCES_ROOT = '/g/data1/v10/test_resources'
#
# Setup information for the test server. This might be better off loaded
# from a config file, but this will do for now. The password is kept
# in a .pgpass file to avoid saving it in versioned files. This is likely
# a better solution than recording the password either here or in a config
# file.
#
TESTSERVER_PARAMS = {
'name': 'test_server',
'host': '130.56.244.226',
'port': '6432',
'user': 'cube_tester',
'superuser': 'cube_admin'
}
#
# Database connection constants. These would be better off being defaults
# for items that can be overridden by a configuration file.
#
CONNECT_TIMEOUT = 60
MAINTENANCE_DB = 'postgres'
TEMPLATE_DB = 'template0'
USE_PGBOUNCER = True
PGBOUNCER_DB = 'pgbouncer'
#
# Random string constants. These set the parameters for random strings
# appended to database names by the random_name utility function. The intent
# is to make temporary database names (most likely) unique to avoid clashes.
# The current format is 9 decimal digits.
#
RANDOM_STR_MIN = 1
RANDOM_STR_MAX = 999999999
RANDOM_STR_FORMAT = "%09d"
#
# Server class
#
class Server(object):
"""Abstraction of a database server.
Gathers all the parameters that describe a server or how to work
with it, and provides services that use this information."""
def __init__(self, params):
self.name = params['name']
self.host = params['host']
self.port = params['port']
self.user = params['user']
self.superuser = params['superuser']
def connect(self, dbname, superuser=False, autocommit=True):
"""Create a pscopg2 connection to a database and return it.
dbname: The database to connect to.
superuser: Set to True to connect as the superuser, otherwise
connect as the user.
autocommit: Set to False to turn off autocommit, otherwise
autocommit will be turned on."""
user = (self.superuser if superuser else self.user)
dsn = ("dbname=%s host=%s port=%s user=%s connect_timeout=%s" %
(dbname, self.host, self.port, user, CONNECT_TIMEOUT))
conn = psycopg2.connect(dsn)
conn.autocommit = autocommit
return conn
def exists(self, dbname):
"""Returns True if the named database exists on the server."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
result = maint_conn.exists(dbname)
finally:
maint_conn.close()
return result
def dblist(self):
"""Returns a list of the databases on the server."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
result = maint_conn.dblist()
finally:
maint_conn.close()
return result
def load(self, dbname, save_dir, save_file):
"""Load the contents of a database from a file.
The database should be empty, and based off template0 or
equivalent. This method calls the psql command to do the load."""
save_path = os.path.join(save_dir, save_file)
load_cmd = ["psql",
"--dbname=%s" % dbname,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port,
"--file=%s" % save_path]
try:
subprocess.check_output(load_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
# Make sure error output is in the error message.
message = ("%s: problem calling %s:\n%s" %
(__name__, err.cmd[0], err.output))
for k in range(len(load_cmd)):
message = message + load_cmd[k]
raise AssertionError(message)
def save(self, dbname, save_dir, save_file, table=None):
"""Save the contents of a database to a file.
This method calls the pg_dump command to do the save. This
dump is in sql script format so use psql to reload."""
save_path = os.path.join(save_dir, save_file)
save_cmd = ["pg_dump",
"--dbname=%s" % dbname,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port,
"--file=%s" % save_path]
if table:
save_cmd.append("--table=%s" % table)
try:
subprocess.check_output(save_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
#Make sure error output is in the error message.
message = ("%s: problem calling %s:\n%s" %
(__name__, err.cmd[0], err.output))
raise AssertionError(message)
def copy_table_between_databases(self, dbname1, dbname2, table_name):
"""Copy a table from one database to another on the same server.
This method pipes the output of pg_dump to psql."""
dump_cmd = ["pg_dump",
"--dbname=%s" % dbname1,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port,
"--table=%s" % table_name]
load_cmd = ["psql",
"--dbname=%s" % dbname2,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port
]
try:
ps_dump = subprocess.Popen(dump_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
subprocess.check_output(load_cmd,
stdin=ps_dump.stdout,
stderr=subprocess.STDOUT)
ps_dump.wait()
except subprocess.CalledProcessError as err:
#Make sure error output is in the error message.
message = ("%s: problem calling %s:\n%s" %
(__name__, err.cmd[0], err.output))
raise AssertionError(message)
def drop(self, dbname):
"""Drop the named database.
Connections are closed explicitly with try/finally blocks,
since they do not seem to be closed automatically in the
case of exceptions and this causes problems.
If pgbouncer is in use a pgbouncer pause command needs to
be issued before dropping the database. This will wait
until active transactions are complete."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
if maint_conn.exists(dbname):
if USE_PGBOUNCER:
bouncer_conn = BouncerWrapper(
self.connect(PGBOUNCER_DB, superuser=True))
try:
bouncer_conn.pause(dbname)
maint_conn.drop(dbname)
finally:
bouncer_conn.close()
else:
maint_conn.drop(dbname)
finally:
maint_conn.close()
def create(self, dbname, save_dir=None, save_file=None,
template_db=TEMPLATE_DB):
"""Creates and loads a database from a file.
This method does a clean create and load of the named database
from the file 'savefile'. It drops an old database of the same
name if neccessary.
It uses template_db as the template database, which is copied
to create the new database.
If save_dir or save_file are None (or not specified), no
save file is loaded.
Connections are closed explicitly with try/finally blocks,
since they do not seem to be closed automatically in the
case of exceptions and this causes problems.
If pgbouncer is in use a pgbouncer pause command needs to
be issued before dropping the database. This will wait
until active transactions are complete. The pgbouncer
resume command is issued once the database is (re)created.
This is needed to prevent connection attempts to the new database
from hanging or returning errors if pgbouncer had pools set
up on the old database."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
# Create the database, dropping it first if needed.
if USE_PGBOUNCER:
bouncer_conn = BouncerWrapper(
self.connect(PGBOUNCER_DB, superuser=True))
try:
if maint_conn.exists(dbname):
bouncer_conn.pause(dbname)
maint_conn.drop(dbname)
# To be used as a template, template_db must have
# no current connections.
bouncer_conn.kill(template_db)
maint_conn.create(dbname, template_db)
bouncer_conn.resume(dbname)
finally:
bouncer_conn.close()
else:
if maint_conn.exists(dbname):
maint_conn.drop(dbname)
maint_conn.create(dbname, template_db)
# Load the new database from the save file if necessary
if save_file is not None or save_dir is not None:
self.load(dbname, save_dir, save_file)
# Run ANALYSE on the newly loaded database
db_conn = ConnectionWrapper(self.connect(dbname, superuser=True))
try:
db_conn.analyse()
finally:
db_conn.close()
# All done
finally:
maint_conn.close()
#
# Connection wrappers.
#
class ConnectionWrapper(object):
"""Generic connection wrapper, inherited by the specific wrappers.
This is a wrapper for a psycopg2 database connection. It
passes on unknown attribute references to the wrapped connection
using __getattr__. The specific wrappers that inherit from this
implement queries and operations on the connection (self.conn)
as methods.
Some utility methods are implemented here. database_name is
useful for testing and error messages. analyse is used after
a database has been created."""
def __init__(self, conn):
self.conn = conn
def database_name(self):
"""Returns the name of the connected database."""
sql = ("SELECT catalog_name\n" +
"FROM information_schema.information_schema_catalog_name;")
with self.conn.cursor() as curs:
curs.execute(sql)
dbname = curs.fetchone()[0]
return dbname
def analyse(self):
"""Runs the ANALYSE command on the connected database."""
with self.conn.cursor() as curs:
curs.execute("ANALYSE;")
def __getattr__(self, attrname):
"""Delegate unknown attributes to the psycopg2 connection."""
return getattr(self.conn, attrname)
class MaintenanceWrapper(ConnectionWrapper):
"""Wrapper for a connection intented for maintenance commands."""
def exists(self, dbname):
"""Returns True if the named database exists."""
exists_sql = ("SELECT datname FROM pg_database\n" +
"WHERE datname = %(dbname)s;")
with self.conn.cursor() as curs:
curs.execute(exists_sql, {'dbname': dbname})
db_found = bool(curs.fetchone())
return db_found
def dblist(self):
"""Returns a list of the databases on the server."""
dblist_sql = "SELECT datname FROM pg_database;"
with self.conn.cursor() as curs:
curs.execute(dblist_sql)
result = [tup[0] for tup in curs.fetchall()]
return result
def drop(self, dbname):
"""Drops the named database."""
drop_sql = "DROP DATABASE %s;" % safe_name(dbname)
with self.conn.cursor() as curs:
curs.execute(drop_sql)
def create(self, dbname, template_db=TEMPLATE_DB):
"""Creates the named database."""
create_sql = ("CREATE DATABASE %s\n" % safe_name(dbname) +
"TEMPLATE %s;" % template_db)
with self.conn.cursor() as curs:
curs.execute(create_sql)
class BouncerWrapper(ConnectionWrapper):
"""Wrapper for a connection to the pgbouncer console pseudo-database.
Obviously these commands will not work if connected to an ordinary
database.
These commands will ignore errors since pgbouncer may
not know about the database the operations are being done on, but
the commands have to be run anyway in case it does."""
def pause(self, dbname):
"""Tells pgbouncer to pause the named database.
This should cause pgbouncer to disconnect from dbname, first
waiting for any queries to complete. This allows the database
to be dropped.
"""
pause_sql = "PAUSE %s;" % safe_name(dbname)
with self.conn.cursor() as cur:
try:
cur.execute(pause_sql)
except psycopg2.DatabaseError:
pass
def kill(self, dbname):
"""Tells pgbouncer to kill its connections to the named database.
This should cause pgbouncer to disconnect from dbname without waiting
for any queries to complete.
"""
kill_sql = "KILL %s;" % safe_name(dbname)
with self.conn.cursor() as cur:
try:
cur.execute(kill_sql)
except psycopg2.DatabaseError:
pass
def resume(self, dbname):
"""Tells pgbouncer to resume work on the named database.
If this is not called and the database was previously
paused then connection attempts will hang or give errors."""
resume_sql = "RESUME %s;" % safe_name(dbname)
with self.conn.cursor() as cur:
try:
cur.execute(resume_sql)
except psycopg2.DatabaseError:
pass
#
# Utility functions
#
def random_name(basename=""):
"""Returns a database name with a 9 digit random number appended."""
random_str = (RANDOM_STR_FORMAT %
random.randint(RANDOM_STR_MIN, RANDOM_STR_MAX))
return basename + "_" + random_str
def safe_name(dbname):
"""Returns a database name with non letter, digit, _ characters removed."""
char_list = [c for c in dbname if c.isalnum() or c == '_']
return "".join(char_list)
def resources_directory(*names):
"""Returns the path to a test resources directory, creating it if needed.
The path of the directory is TEST_RESOURCES_ROOT/name1/name2/...
where name1, name2, ... are the names passed in as parameters.
"""
test_dir = os.path.join(TEST_RESOURCES_ROOT, *names)
if not os.path.isdir(test_dir):
# Allow group permissions on the directory we are about to create
old_umask = os.umask(0o007)
# Make the directories
os.makedirs(test_dir)
# Put back the old umask
os.umask(old_umask)
return test_dir
def version_or_user(version=None, user=None):
"""Returns the version or user for a test resources directory.
Returns the version string, unless version is 'user', in which case
the user string is returned instead. Defaults are described below.
version: The version of the datacube code. This is expected to be either
'develop', 'user', or a version number. If not given it is taken
from the DATACUBE_VERSION environment variable. If the DATACUBE_VERSION
variable is not defined it is taken to be 'user'.
user: The user name. This is used in place of version if version is 'user'.
If this is not defined it is taken from the USER environment variable.
"""
if not version:
# Using 'not version' rather than 'version is None' here because
# "" is NOT a valid version.
version = os.environ.get('DATACUBE_VERSION', 'user')
if version == 'user':
if not user:
# Using 'not user' rather than 'user is None' here because
# "" is NOT a valid user.
user = os.environ['USER']
return user
else:
return version
def input_directory(module, suite, version=None, user=None):
"""Returns a path to a test input directory, creating it if needed.
The path of the directory is
TEST_RESOURCES_ROOT/version/input/module/suite/. If the version is
'user' then the user argument takes the place of version in the path.
module: The name of the module being tested, eg 'dbcompare'.
suite: The name of the test suite of test class containting the test,
eg 'TestReporter'.
version: The version of the datacube code. This is expected to be either
'develop', 'user', or a version number. If not given it is taken
from the DATACUBE_VERSION environment variable. If the DATACUBE_VERSION
variable is not defined it is taken to be 'user'.
user: The user name. This is used in place of version if version is 'user'.
If this is not defined it is taken from the USER environment variable.
The 'input' directory is for input or setup files for tests. The
files are expected to be named after the test that uses them.
"""
version = version_or_user(version, user)
return resources_directory(version, 'input', module, suite)
def output_directory(module, suite, user=None):
"""Returns the path to a test output directory, creating it if needed.
The path of the directory is TEST_RESOUCES_ROOT/user/output/module/suite/.
If user is not given, the environment variable USER is used as the
name of the user.
module: the name of the module being tested, eg 'dbcompare'
suite: the name of the test suite or test class containting the test,
eg 'TestReporter'
The 'output' directory is for the output of the tests. The files are
expected to be named after the test that produces them.
"""
version = version_or_user(version='user', user=user)
return resources_directory(version, 'output', module, suite)
def expected_directory(module, suite, version=None, user=None):
"""Returns a path to a test expected directory, creating it if needed.
The path of the directory is
TEST_RESOURCES_ROOT/version/expected/module/suite/. If the version is
'user' then the user argument takes the place of version in the path.
module: The name of the module being tested, eg 'dbcompare'.
suite: The name of the test suite of test class containting the test,
eg 'TestReporter'.
version: The version of the datacube code. This is expected to be either
'develop', 'user', or a version number. If not given it is taken
from the DATACUBE_VERSION environment variable. If the DATACUBE_VERSION
variable is not defined it is taken to be 'user'.
user: The user name. This is used in place of version if version is 'user'.
If this is not defined it is taken from the USER environment variable.
The 'expected' directory is for the expected output of the tests. The
files are expected to be named after the test that produces them. These
files are used to automate the tests by comparing output produced against
expected output.
"""
version = version_or_user(version, user)
return resources_directory(version, 'expected', module, suite)
def temp_directory(module, suite, test_dir, version=None, user=None):
"""Returns a path to a temp subdirectory, creating it if needed."""
version = version_or_user(version, user)
return resources_directory(version, test_dir, module, suite, 'temp')
def tile_root_directory(module, suite, test_dir, version=None, user=None):
"""Returns a path to a tile_root subdirectory, creating it if needed."""
version = version_or_user(version, user)
return resources_directory(version, test_dir, module, suite, 'tile_root')
def update_config_file(dbname, input_dir, output_dir, config_file_name,
output_file_name=None):
"""Creates a temporary agdc_default.config file by updating the database name.
This function returns the path to the updated config file.
dbname: the name of the database to connect to.
input_dir: the directory containing the config file template.
output_dir: the directory in which the updated config file will be written.
config_file_name: the name of the config file (template and updated).
output_file_name: the name of the updated config file - if this is not
specified, it is taken to be the same as the config_file_name.
"""
return update_config_file2({'dbname': dbname}, input_dir, output_dir,
config_file_name, output_file_name)
def update_config_file2(parameter_values_dict, input_dir, output_dir,
config_file_name, output_file_name=None):
"""Creates a temporary agdc_default.config file by updating those attributes
according to the dictionary parameter_values.
This function returns the path to the updated config file.
parameter_values_dict: a dictionary of parameter-values to be inserted
into the template config file
input_dir: the directory containing the config file template.
output_dir: the directory in which the updated config file will be written.
config_file_name: the name of the config template file.
output_file_name: the name of the updated config file - if this is not
specified, it is taken to be the same as the config_file_name.
"""
template_path = os.path.join(input_dir, config_file_name)
if output_file_name:
update_path = os.path.join(output_dir, output_file_name)
else:
update_path = os.path.join(output_dir, config_file_name)
with open(template_path) as template:
template_str = template.read()
update_str = template_str
for param, value in parameter_values_dict.items():
update_str = re.sub(r'^\s*%s\s*=[^\n\r]*(\r?)$' % param,
r'%s = %s\1' % (param, value),
update_str, flags=re.MULTILINE)
with open(update_path, 'w') as update:
update.write(update_str)
return update_path
def create_logger(name, logfile_path=None):
"""Creates a logger object in the datacube style.
This sets up a logger with handler, formatter, and level defined
as is usual for the datacube scripts. 'name' is the name of the
logger, __name__ (the current module) is a typical value.
If 'logfile_path' is set it is taken as the name of a log file,
which is opened in write mode and used to create the logger.
Otherwise sys.stdout is used."""
if logfile_path:
console_handler = logging.FileHandler(logfile_path, mode='w')
else:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger = logging.getLogger(name)
if logger.level == logging.NOTSET:
logger.setLevel(logging.DEBUG) # Default logging level for all modules
logger.addHandler(console_handler)
return logger
#
# Test server instance:
#
TESTSERVER = Server(TESTSERVER_PARAMS)
|
|
import ZSI
import ZSI.TCcompound
from ZSI.schema import LocalElementDeclaration, ElementDeclaration, TypeDefinition, GTD, GED
################################################
# targetNamespace
# https://www.eway.com.au/gateway/managedpayment
################################################
class Eway:
targetNamespace = "https://www.eway.com.au/gateway/managedpayment"
class EwayHeader(ZSI.TCcompound.ComplexType, TypeDefinition):
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "eWAYHeader")
def __init__(self, pname=None, ofwhat=(), attributes=None, extend=False, restrict=False, **kw):
ns = Eway.eWAYHeader.schema
TClist = [ZSI.TC.String(pname=(ns,"eWAYCustomerID"), aname="customer_id", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Username"), aname="username", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Password"), aname="password", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
else:
# attribute handling code
self.attribute_typecode_dict[("http://www.w3.org/2001/XMLSchema","anyAttribute")] = ZSI.TC.AnyElement()
if not pname:
pname = ("https://www.eway.com.au/gateway/managedpayment","eWAYHeader")
ZSI.TCcompound.ComplexType.__init__(self, None, TClist, pname=pname, inorder=0, **kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._eWAYCustomerID = None
self._Username = None
self._Password = None
return
Holder.__name__ = "eWAYHeader_Holder"
self.pyclass = Holder
class CreditCard(TypeDefinition):
#complexType/complexContent extension
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "CreditCard")
def __init__(self, pname, ofwhat=(), extend=False, restrict=False, attributes=None, **kw):
ns = Eway.CreditCard.schema
TClist = [ZSI.TC.String(pname=(ns,"CCName"), aname="name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCNumber"), aname="number", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCExpiryMonth"), aname="expiry_month", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCExpiryYear"), aname="expiry_year", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
attributes = self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
if Eway.ManagedCustomer not in Eway.CreditCard.__bases__:
bases = list(Eway.CreditCard.__bases__)
bases.insert(0, Eway.ManagedCustomer)
Eway.CreditCard.__bases__ = tuple(bases)
Eway.ManagedCustomer.__init__(self, pname, ofwhat=TClist, extend=True, attributes=attributes, **kw)
class ManagedCustomer(ZSI.TCcompound.ComplexType, TypeDefinition):
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "ManagedCustomer")
def __init__(self, pname, ofwhat=(), attributes=None, extend=False, restrict=False, **kw):
ns = Eway.ManagedCustomer.schema
TClist = [ZSI.TCnumbers.Ilong(pname=(ns,"ManagedCustomerID"), aname="id", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerRef"), aname="reference", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerTitle"), aname="title", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerFirstName"), aname="first_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerLastName"), aname="last_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerCompany"), aname="company", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerJobDesc"), aname="job_description", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerEmail"), aname="email", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerAddress"), aname="address", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerSuburb"), aname="suburb", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerState"), aname="state", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerPostCode"), aname="postcode", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerCountry"), aname="country", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerPhone1"), aname="phone1", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerPhone2"), aname="phone2", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerFax"), aname="fax", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerURL"), aname="url", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerComments"), aname="comments", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
ZSI.TCcompound.ComplexType.__init__(self, None, TClist, pname=pname, inorder=0, **kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.id = None
self.reference = None
self.title = None
self.first_name = None
self.last_name = None
self.company = None
self.job_description = None
self.email = None
self.address = None
self.suburb = None
self.state = None
self.postcode = None
self.country = None
self.phone1 = None
self.phone2 = None
self.fax = None
self.url = None
self.comments = None
return
Holder.__name__ = "ManagedCustomer_Holder"
self.pyclass = Holder
class CCPaymentResponse(ZSI.TCcompound.ComplexType, TypeDefinition):
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "CCPaymentResponse")
def __init__(self, pname, ofwhat=(), attributes=None, extend=False, restrict=False, **kw):
ns = Eway.CCPaymentResponse.schema
TClist = [ZSI.TC.String(pname=(ns,"ewayTrxnError"), aname="transaction_error", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"ewayTrxnStatus"), aname="transaction_status", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"ewayTrxnNumber"), aname="transaction_number", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"ewayReturnAmount"), aname="return_amount", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"ewayAuthCode"), aname="auth_code", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
ZSI.TCcompound.ComplexType.__init__(self, None, TClist, pname=pname, inorder=0, **kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.transaction_erro = None
self.transaction_status = None
self.transaction_number = None
self.return_amount = None
self.auth_code = None
return
Holder.__name__ = "CCPaymentResponse_Holder"
self.pyclass = Holder
class ArrayOfManagedTransaction(ZSI.TCcompound.ComplexType, TypeDefinition):
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "ArrayOfManagedTransaction")
def __init__(self, pname, ofwhat=(), attributes=None, extend=False, restrict=False, **kw):
ns = Eway.ArrayOfManagedTransaction.schema
TClist = [GTD("https://www.eway.com.au/gateway/managedpayment","ManagedTransaction",lazy=False)(pname=(ns,"ManagedTransaction"), aname="managed_transaction", minOccurs=0, maxOccurs="unbounded", nillable=True, typed=False, encoded=kw.get("encoded"))]
self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
ZSI.TCcompound.ComplexType.__init__(self, None, TClist, pname=pname, inorder=0, **kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.managed_transaction = []
return
Holder.__name__ = "ArrayOfManagedTransaction_Holder"
self.pyclass = Holder
class ManagedTransaction(ZSI.TCcompound.ComplexType, TypeDefinition):
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "ManagedTransaction")
def __init__(self, pname, ofwhat=(), attributes=None, extend=False, restrict=False, **kw):
ns = Eway.ManagedTransaction.schema
TClist = [ZSI.TCnumbers.Iint(pname=(ns,"TotalAmount"), aname="total_amount", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"Result"), aname="result", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"ResponseText"), aname="response_text", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCtimes.gDateTime(pname=(ns,"TransactionDate"), aname="transaction_date", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"ewayTrxnNumber"), aname="transaction_number", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
ZSI.TCcompound.ComplexType.__init__(self, None, TClist, pname=pname, inorder=0, **kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.total_amount = None
self.result = None
self.response_text = None
self.transaction_date = None
self.transaction_number = None
return
Holder.__name__ = "ManagedTransaction_Holder"
self.pyclass = Holder
class CreateCustomer(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "CreateCustomer"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.CreateCustomer.schema
TClist = [ZSI.TC.String(pname=(ns,"Title"), aname="title", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"FirstName"), aname="first_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"LastName"), aname="last_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Address"), aname="address", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Suburb"), aname="suburb", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"State"), aname="state", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Company"), aname="company", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"PostCode"), aname="postcode", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Country"), aname="country", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Email"), aname="email", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Fax"), aname="fax", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Phone"), aname="phone", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Mobile"), aname="mobile", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerRef"), aname="customer_reference", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"JobDesc"), aname="job_description", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Comments"), aname="comments", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"URL"), aname="url", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCNumber"), aname="card_number", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCNameOnCard"), aname="card_holder_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"CCExpiryMonth"), aname="card_expiry_month", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"CCExpiryYear"), aname="card_expiry_year", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","CreateCustomer")
kw["aname"] = "_CreateCustomer"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
self.title = None
self.first_name = None
self.last_name = None
self.address = None
self.suburb = None
self.state = None
self.company = None
self.postcode = None
self.company = None
self.email = None
self.fax = None
self.phone = None
self.mobile = None
self.customer_reference = None
self.job_description = None
self.comments = None
self.url = None
self.card_number = None
self.card_holder_name = None
self.card_expiry_month = None
self.card_expiry_year = None
return
Holder.__name__ = "CreateCustomer_Holder"
self.pyclass = Holder
class CreateCustomerResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "CreateCustomerResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.CreateCustomerResponse.schema
TClist = [ZSI.TC.String(pname=(ns,"CreateCustomerResult"), aname="create_customer_result", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","CreateCustomerResponse")
kw["aname"] = "_CreateCustomerResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
self.create_customer_result = None
return
Holder.__name__ = "CreateCustomerResponse_Holder"
self.pyclass = Holder
class eWAYHeader(ElementDeclaration):
literal = "eWAYHeader"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","eWAYHeader")
kw["aname"] = "_eWAYHeader"
if Eway.eWAYHeader not in Eway.eWAYHeader.__bases__:
bases = list(Eway.eWAYHeader.__bases__)
bases.insert(0, Eway.eWAYHeader)
Eway.eWAYHeader.__bases__ = tuple(bases)
Eway.eWAYHeader.__init__(self, **kw)
if self.pyclass is not None: self.pyclass.__name__ = "eWAYHeader_Holder"
class UpdateCustomer(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "UpdateCustomer"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.UpdateCustomer.schema
TClist = [ZSI.TCnumbers.Ilong(pname=(ns,"managedCustomerID"), aname="id", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Title"), aname="title", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"FirstName"), aname="first_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"LastName"), aname="last_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Address"), aname="address", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Suburb"), aname="suburb", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"State"), aname="state", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Company"), aname="company", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"PostCode"), aname="postcode", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Country"), aname="country", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Email"), aname="email", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Fax"), aname="fax", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Phone"), aname="phone", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Mobile"), aname="mobile", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerRef"), aname="customer_reference", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"JobDesc"), aname="job_description", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Comments"), aname="comments", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"URL"), aname="url", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCNumber"), aname="card_number", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCNameOnCard"), aname="card_holder_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"CCExpiryMonth"), aname="card_expiry_month", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"CCExpiryYear"), aname="card_expiry_year", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","UpdateCustomer")
kw["aname"] = "_UpdateCustomer"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.id = None
self.title = None
self.first_name = None
self.last_name = None
self.address = None
self.suburb = None
self.state = None
self.company = None
self.postcode = None
self.country = None
self.email = None
self.fax = None
self.phone = None
self.mobile = None
self.customer_reference = None
self.job_description = None
self.comments = None
self.url = None
self.card_number = None
self.card_holder_name = None
self.card_expiry_month = None
self.card_expiry_year = None
return
Holder.__name__ = "UpdateCustomer_Holder"
self.pyclass = Holder
class UpdateCustomerResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "UpdateCustomerResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.UpdateCustomerResponse.schema
TClist = [ZSI.TC.Boolean(pname=(ns,"UpdateCustomerResult"), aname="update_customer_result", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","UpdateCustomerResponse")
kw["aname"] = "update_customer_response"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.update_customer_result = None
return
Holder.__name__ = "UpdateCustomerResponse_Holder"
self.pyclass = Holder
class QueryCustomer(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryCustomer"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryCustomer.schema
TClist = [ZSI.TCnumbers.Ilong(pname=(ns,"managedCustomerID"), aname="id", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryCustomer")
kw["aname"] = "_QueryCustomer"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.id = None
return
Holder.__name__ = "QueryCustomer_Holder"
self.pyclass = Holder
class QueryCustomerResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryCustomerResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryCustomerResponse.schema
TClist = [GTD("https://www.eway.com.au/gateway/managedpayment","CreditCard",lazy=False)(pname=(ns,"QueryCustomerResult"), aname="query_customer_result", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryCustomerResponse")
kw["aname"] = "_QueryCustomerResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.query_customer_result = None
return
Holder.__name__ = "QueryCustomerResponse_Holder"
self.pyclass = Holder
class QueryCustomerByReference(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryCustomerByReference"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryCustomerByReference.schema
TClist = [ZSI.TC.String(pname=(ns,"CustomerReference"), aname="customer_reference", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryCustomerByReference")
kw["aname"] = "_QueryCustomerByReference"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.customer_reference = None
return
Holder.__name__ = "QueryCustomerByReference_Holder"
self.pyclass = Holder
class QueryCustomerByReferenceResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryCustomerByReferenceResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryCustomerByReferenceResponse.schema
TClist = [GTD("https://www.eway.com.au/gateway/managedpayment","CreditCard",lazy=False)(pname=(ns,"QueryCustomerByReferenceResult"), aname="query_customer_by_reference_result", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryCustomerByReferenceResponse")
kw["aname"] = "_QueryCustomerByReferenceResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.query_customer_by_reference_result = None
return
Holder.__name__ = "QueryCustomerByReferenceResponse_Holder"
self.pyclass = Holder
class ProcessPayment(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "ProcessPayment"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.ProcessPayment.schema
TClist = [ZSI.TCnumbers.Ilong(pname=(ns,"managedCustomerID"), aname="id", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"amount"), aname="amount", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"invoiceReference"), aname="invoice_reference", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"invoiceDescription"), aname="invoice_description", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","ProcessPayment")
kw["aname"] = "_ProcessPayment"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.id = None
self.amount = None
self.invoice_reference = None
self.invoice_description = None
return
Holder.__name__ = "ProcessPayment_Holder"
self.pyclass = Holder
class ProcessPaymentResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "ProcessPaymentResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.ProcessPaymentResponse.schema
TClist = [GTD("https://www.eway.com.au/gateway/managedpayment","CCPaymentResponse",lazy=False)(pname=(ns,"ewayResponse"), aname="response", minOccurs=1, maxOccurs=1, nillable=True, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","ProcessPaymentResponse")
kw["aname"] = "_ProcessPaymentResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.response = None
return
Holder.__name__ = "ProcessPaymentResponse_Holder"
self.pyclass = Holder
class QueryPayment(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryPayment"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryPayment.schema
TClist = [ZSI.TCnumbers.Ilong(pname=(ns,"managedCustomerID"), aname="id", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryPayment")
kw["aname"] = "_QueryPayment"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.id = None
return
Holder.__name__ = "QueryPayment_Holder"
self.pyclass = Holder
class QueryPaymentResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryPaymentResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryPaymentResponse.schema
TClist = [GTD("https://www.eway.com.au/gateway/managedpayment","ArrayOfManagedTransaction",lazy=False)(pname=(ns,"QueryPaymentResult"), aname="query_payment_result", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryPaymentResponse")
kw["aname"] = "_QueryPaymentResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.query_payment_result = None
return
Holder.__name__ = "QueryPaymentResponse_Holder"
self.pyclass = Holder
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.dataflow_v1beta3.services.jobs_v1_beta3 import pagers
from google.cloud.dataflow_v1beta3.types import environment
from google.cloud.dataflow_v1beta3.types import jobs
from google.cloud.dataflow_v1beta3.types import snapshots
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import JobsV1Beta3Transport, DEFAULT_CLIENT_INFO
from .transports.grpc import JobsV1Beta3GrpcTransport
from .transports.grpc_asyncio import JobsV1Beta3GrpcAsyncIOTransport
class JobsV1Beta3ClientMeta(type):
"""Metaclass for the JobsV1Beta3 client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[JobsV1Beta3Transport]]
_transport_registry["grpc"] = JobsV1Beta3GrpcTransport
_transport_registry["grpc_asyncio"] = JobsV1Beta3GrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[JobsV1Beta3Transport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class JobsV1Beta3Client(metaclass=JobsV1Beta3ClientMeta):
"""Provides a method to create and modify Google Cloud Dataflow
jobs. A Job is a multi-stage computation graph run by the Cloud
Dataflow service.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dataflow.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
JobsV1Beta3Client: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
JobsV1Beta3Client: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> JobsV1Beta3Transport:
"""Returns the transport used by the client instance.
Returns:
JobsV1Beta3Transport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, JobsV1Beta3Transport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the jobs v1 beta3 client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, JobsV1Beta3Transport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, JobsV1Beta3Transport):
# transport is a JobsV1Beta3Transport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def create_job(
self,
request: Union[jobs.CreateJobRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> jobs.Job:
r"""Creates a Cloud Dataflow job.
To create a job, we recommend using
``projects.locations.jobs.create`` with a [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints).
Using ``projects.jobs.create`` is not recommended, as your job
will always start in ``us-central1``.
.. code-block:: python
from google.cloud import dataflow_v1beta3
def sample_create_job():
# Create a client
client = dataflow_v1beta3.JobsV1Beta3Client()
# Initialize request argument(s)
request = dataflow_v1beta3.CreateJobRequest(
)
# Make the request
response = client.create_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataflow_v1beta3.types.CreateJobRequest, dict]):
The request object. Request to create a Cloud Dataflow
job.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataflow_v1beta3.types.Job:
Defines a job to be run by the Cloud
Dataflow service.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a jobs.CreateJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.CreateJobRequest):
request = jobs.CreateJobRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_job]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_job(
self,
request: Union[jobs.GetJobRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> jobs.Job:
r"""Gets the state of the specified Cloud Dataflow job.
To get the state of a job, we recommend using
``projects.locations.jobs.get`` with a [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints).
Using ``projects.jobs.get`` is not recommended, as you can only
get the state of jobs that are running in ``us-central1``.
.. code-block:: python
from google.cloud import dataflow_v1beta3
def sample_get_job():
# Create a client
client = dataflow_v1beta3.JobsV1Beta3Client()
# Initialize request argument(s)
request = dataflow_v1beta3.GetJobRequest(
)
# Make the request
response = client.get_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataflow_v1beta3.types.GetJobRequest, dict]):
The request object. Request to get the state of a Cloud
Dataflow job.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataflow_v1beta3.types.Job:
Defines a job to be run by the Cloud
Dataflow service.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a jobs.GetJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.GetJobRequest):
request = jobs.GetJobRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_job]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_job(
self,
request: Union[jobs.UpdateJobRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> jobs.Job:
r"""Updates the state of an existing Cloud Dataflow job.
To update the state of an existing job, we recommend using
``projects.locations.jobs.update`` with a [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints).
Using ``projects.jobs.update`` is not recommended, as you can
only update the state of jobs that are running in
``us-central1``.
.. code-block:: python
from google.cloud import dataflow_v1beta3
def sample_update_job():
# Create a client
client = dataflow_v1beta3.JobsV1Beta3Client()
# Initialize request argument(s)
request = dataflow_v1beta3.UpdateJobRequest(
)
# Make the request
response = client.update_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataflow_v1beta3.types.UpdateJobRequest, dict]):
The request object. Request to update a Cloud Dataflow
job.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataflow_v1beta3.types.Job:
Defines a job to be run by the Cloud
Dataflow service.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a jobs.UpdateJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.UpdateJobRequest):
request = jobs.UpdateJobRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_job]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_jobs(
self,
request: Union[jobs.ListJobsRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobsPager:
r"""List the jobs of a project.
To list the jobs of a project in a region, we recommend using
``projects.locations.jobs.list`` with a [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints).
To list the all jobs across all regions, use
``projects.jobs.aggregated``. Using ``projects.jobs.list`` is
not recommended, as you can only get the list of jobs that are
running in ``us-central1``.
.. code-block:: python
from google.cloud import dataflow_v1beta3
def sample_list_jobs():
# Create a client
client = dataflow_v1beta3.JobsV1Beta3Client()
# Initialize request argument(s)
request = dataflow_v1beta3.ListJobsRequest(
)
# Make the request
page_result = client.list_jobs(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.dataflow_v1beta3.types.ListJobsRequest, dict]):
The request object. Request to list Cloud Dataflow jobs.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataflow_v1beta3.services.jobs_v1_beta3.pagers.ListJobsPager:
Response to a request to list Cloud
Dataflow jobs in a project. This might
be a partial response, depending on the
page size in the ListJobsRequest.
However, if the project does not have
any jobs, an instance of
ListJobsResponse is not returned and the
requests's response body is empty {}.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a jobs.ListJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.ListJobsRequest):
request = jobs.ListJobsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_jobs]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def aggregated_list_jobs(
self,
request: Union[jobs.ListJobsRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.AggregatedListJobsPager:
r"""List the jobs of a project across all regions.
.. code-block:: python
from google.cloud import dataflow_v1beta3
def sample_aggregated_list_jobs():
# Create a client
client = dataflow_v1beta3.JobsV1Beta3Client()
# Initialize request argument(s)
request = dataflow_v1beta3.ListJobsRequest(
)
# Make the request
page_result = client.aggregated_list_jobs(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.dataflow_v1beta3.types.ListJobsRequest, dict]):
The request object. Request to list Cloud Dataflow jobs.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataflow_v1beta3.services.jobs_v1_beta3.pagers.AggregatedListJobsPager:
Response to a request to list Cloud
Dataflow jobs in a project. This might
be a partial response, depending on the
page size in the ListJobsRequest.
However, if the project does not have
any jobs, an instance of
ListJobsResponse is not returned and the
requests's response body is empty {}.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a jobs.ListJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.ListJobsRequest):
request = jobs.ListJobsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.aggregated_list_jobs]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.AggregatedListJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def check_active_jobs(
self,
request: Union[jobs.CheckActiveJobsRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> jobs.CheckActiveJobsResponse:
r"""Check for existence of active jobs in the given
project across all regions.
.. code-block:: python
from google.cloud import dataflow_v1beta3
def sample_check_active_jobs():
# Create a client
client = dataflow_v1beta3.JobsV1Beta3Client()
# Initialize request argument(s)
request = dataflow_v1beta3.CheckActiveJobsRequest(
)
# Make the request
response = client.check_active_jobs(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataflow_v1beta3.types.CheckActiveJobsRequest, dict]):
The request object. Request to check is active jobs
exists for a project
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataflow_v1beta3.types.CheckActiveJobsResponse:
Response for CheckActiveJobsRequest.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a jobs.CheckActiveJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.CheckActiveJobsRequest):
request = jobs.CheckActiveJobsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.check_active_jobs]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def snapshot_job(
self,
request: Union[jobs.SnapshotJobRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> snapshots.Snapshot:
r"""Snapshot the state of a streaming job.
.. code-block:: python
from google.cloud import dataflow_v1beta3
def sample_snapshot_job():
# Create a client
client = dataflow_v1beta3.JobsV1Beta3Client()
# Initialize request argument(s)
request = dataflow_v1beta3.SnapshotJobRequest(
)
# Make the request
response = client.snapshot_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataflow_v1beta3.types.SnapshotJobRequest, dict]):
The request object. Request to create a snapshot of a
job.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataflow_v1beta3.types.Snapshot:
Represents a snapshot of a job.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a jobs.SnapshotJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.SnapshotJobRequest):
request = jobs.SnapshotJobRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.snapshot_job]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dataflow-client",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("JobsV1Beta3Client",)
|
|
#-----------------------------------------
# code to recreate the spline garch model
# Example is drawn from the Oxmetrics 7.0
# G@RCH package using NASDAQ returns
#-----------------------------------------
#-------------------------------------------
# import necessary packages and functions
#-------------------------------------------
import numpy as np
import pandas as pd
from math import pi, log, isnan, isinf
from matplotlib import pyplot as plt
from scipy.optimize import minimize, fmin_slsqp
from scipy.linalg import inv
#-----------------------------
# read in the data
# data is from Oxmetrics 7.0
#-----------------------------
nasdaq = pd.read_csv('nasdaq_ox_example.csv', \
index_col=1, parse_dates=True)
nasdaq_rtn = np.asarray(nasdaq['Nasdaq'])
#----------------------------
# lag array helper function
#----------------------------
def lag_array(series, lag, c=1):
# begin buildinglags
T = series.size
final = np.array([])
if type(lag)==int:
for i in xrange(1,lag+1):
if i==1:
final = series[lag-i:-i][np.newaxis].T
else:
final = np.hstack((final,series[lag-i:-i][np.newaxis].T))
elif type(lag)==list:
count = 0
for i in lag:
if count==0:
final = series[max(lag)-i:-i][np.newaxis].T
else:
final = np.hstack((final,series[max(lag)-i:-i][np.newaxis].T))
count += 1
if c==1 and final.size > 0:
final = np.hstack((np.ones(len(final))[np.newaxis].T, final))
elif c==1 and final.size == 0:
final = np.ones(T)
return final
#--------------------------
# determine the AR errors
#--------------------------
def AR_err(series, b, ar=[], c=0):
X = lag_array(series, ar, c)
T = X.shape[0]
Y = series[-T:]
e = Y - np.dot(X,b.T)
return e,T
#------------------------
# determine the spline
#------------------------
def spline(tau_parms, T, knots):
trend = np.array([t for t in xrange(1,T+1)])
tau = np.resize(np.repeat(trend,knots),(T,knots))
pts = np.array([round(T/knots*i,0) for i in xrange(knots)])
factors = 1.0*(tau - pts > 0)
tau = np.hstack(((trend*tau_parms[1]/T)[np.newaxis].T, tau_parms[2:]*factors*((tau - pts)/T)**2)) # scaled spline from Oxmetrics
#tau = np.hstack(((trend*tau_parms[1])[np.newaxis].T, tau_parms[2:]*factors*((tau - pts))**2)) # Engle-Rangel (2009) spline
tau = tau_parms[0]*np.exp(np.sum(tau,axis=1))
return tau
#-----------------------------------------------
# Two-sided jacobian
# Alternative two-sided jacobian
# step size is modeled on Eviews documentation
#-----------------------------------------------
def jacobian(parms, series, c, ar=[], knots=0, full_output=False):
r = 1.49e-8 # relative step size (sq. root of machine epsilon)
m = 10.0e-10 # minimum step size
# set up empty vectors with steps and gradients
s = np.zeros(len(parms),float)
grad = np.zeros(len(parms),float)
# calculate the gradients
for i in xrange(len(parms)):
s[i] = max(r*parms[i], m)
loglik_plus = __loglike__(parms+s, series=series, c=c, ar=ar, knots=knots, full_output = False)
loglik_minus = __loglike__(parms-s, series=series, c=c, ar=ar, knots=knots, full_output = False)
grad[i] =(loglik_plus - loglik_minus)/(2*s[i])
s[i] = 0.0
return grad
#-------------------------
# T x k gradient matrix
# from Sheppard (2014)
#-------------------------
def gradient(__loglike__, k, epsilon=1e-5):
loglik, logliks, e, tau, gt, ht, T =__loglike__(result['x'], np.array(nasdaq_rtn), 1, [1], 2, full_output=True)
step = logliks*epsilon
scores = np.zeros((T, k))
for i in xrange(k):
h = step[i]
delta = np.zeros(k)
delta[i] = h
loglik, logliksplus, e, tau, gt, ht, T =__loglike__(result['x'] + delta, np.array(nasdaq_rtn), 1, [1], 2, full_output=True)
loglik, logliksminus, e, tau, gt, ht, T = __loglike__(result['x'] - delta, np.array(nasdaq_rtn), 1, [1], 2, full_output=True)
scores[:,i] = (logliksplus - logliksminus)/(2*h)
return scores
#-----------------------------
# 2-sided Hessian function
# from Sheppard (2014)
#------------------------------
def hessian_2sided(fun, theta, args, epsilon=1e-05):
f = fun(theta, *args)
h = epsilon*np.abs(theta)
thetah = theta + h
h = thetah - theta
K = np.size(theta,0)
h = np.diag(h)
fp = np.zeros(K)
fm = np.zeros(K)
for i in xrange(K):
fp[i] = fun(theta+h[i], *args)
fm[i] = fun(theta-h[i], *args)
fpp = np.zeros((K,K))
fmm = np.zeros((K,K))
for i in xrange(K):
for j in xrange(i,K):
fpp[i,j] = fun(theta + h[i] + h[j], *args)
fpp[j,i] = fpp[i,j]
fmm[i,j] = fun(theta - h[i] - h[j], *args)
fmm[j,i] = fmm[i,j]
hh = (np.diag(h))
hh = hh.reshape((K,1))
hh = np.dot(hh,hh.T)
H = np.zeros((K,K))
for i in xrange(K):
for j in xrange(i,K):
H[i,j] = (fpp[i,j] - fp[i] - fp[j] + f
+ f - fm[i] - fm[j] + fmm[i,j])/hh[i,j]/2
H[j,i] = H[i,j]
return H
#--------------------------------------
# define the non-negativity constraint
# Tried this; doesn't work
#--------------------------------------
def sp_constraint(parms, series, c, ar, knots, full_output=False):
# break up the parameter vector
b = parms[:len(ar)+c]
tau_parms = parms[-knots-2:]
g_parms = parms[:-knots-2][-2:]
# AR model errors
e, T = AR_err(series, b, ar, c)
# determine the spline
tau = spline(tau_parms, T, knots)
# check non-negative values
non_neg = 1.0*(tau <=0).sum()
return non_neg
#------------------------------
# determine the log likelihood
#------------------------------
def __loglike__(parms, series, c, ar, knots, full_output=False):
# break up the parameter vector
b = parms[:len(ar)+c]
tau_parms = parms[-knots-2:]
g_parms = parms[:-knots-2][-2:]
# AR model errors
e, T = AR_err(series, b, ar, c)
# squared residuals
e2 = e**2
# determine the spline
tau = spline(tau_parms, T, knots)
# determine the short run component
alpha = g_parms[0]
beta = g_parms[1]
gt = np.array([tau[0]])
for t in xrange(1,tau.size):
gt = np.append(gt, (1-alpha-beta) + alpha*(e2[t-1]/tau[t-1]) + beta*gt[t-1])
# conditional variance
ht = np.multiply(tau,gt)
# log likelihood
logliks = 0.5*(np.log(2*pi)+np.log(ht)+(e2/ht))
loglik = logliks.sum()
#if isnan(logliks.sum()) or isinf(logliks.sum()):
# loglik = 1E10
#else:
# loglik = logliks.sum()
#print loglik
if full_output == True:
return -loglik, logliks, e, tau, gt, ht, T
else:
return loglik
#-------------------------------
# initial values from oxmetrics
#-------------------------------
x0 = np.array([0.01, 0.01, 0.1, 0.8, 1.59189, 0.0, 0.0, 0.0])
#---------------------
# estimate the model
#---------------------
# use the two-sided jacobian add jac=jacobian
result = minimize(__loglike__, x0=x0, method='SLSQP',\
args = (np.array(nasdaq_rtn), 1, [1], 2)) ## Sequential Least Sq
#result = fmin_slsqp(__loglike__, x0=x0, full_output=True, \
# args = (np.array(nasdaq_rtn), 1, [1], 2)) ## main SLSQP function
# recover the components
loglik, logliks, e, tau, gt, ht, T = __loglike__(result['x'], np.array(nasdaq_rtn), 1, [1], 2, full_output=True)
#----------------------------
# standard errors
#----------------------------
scores = gradient(__loglike__, result['x'].size)
H = hessian_2sided(__loglike__, result['x'], (np.array(nasdaq_rtn), 1, [1], 2, False))
# outer product of gradient standard errors
OPG = np.dot(scores.T,scores)/T
vcv_opg = inv(OPG)/T
se_opg = np.diag(vcv_opg)**0.5
t_opg = result['x']/se_opg
# second derivatives
vcv_H = inv(H/T)/T
se_H = np.diag(vcv_H)**0.5
t_H = result['x']/se_H
# sandwhich form
vcv_bw = np.dot(inv(H/T), np.dot(OPG,inv(H/T)))/T
se_bw = np.diag(vcv_bw)**0.5
t_bw = result['x']/se_bw
#-------------------------
# plot the NASDAQ returns
#-------------------------
plt.plot(nasdaq['Nasdaq'], 'b-')
plt.show()
#------------------
# plot the results
#------------------
t = np.array([t for t in xrange(T)])
# long run variance component
plt.plot(t, tau,'r-')
plt.show()
# short-run component
plt.plot(t, gt, 'r-')
plt.show()
# total variance
plt.plot(t, gt*tau, 'r-')
plt.show()
|
|
#!/usr/bin/env python
# Copyright (c) 2008 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import platform
import sys
from PyQt4.QtCore import (PYQT_VERSION_STR, QFile, QFileInfo, QSettings,
QT_VERSION_STR, QTimer, QVariant, Qt, SIGNAL)
from PyQt4.QtGui import (QAction, QApplication, QFileDialog, QIcon,
QKeySequence, QMainWindow, QMessageBox, QShortcut, QTableWidget,
QTableWidgetItem)
import addeditmoviedlg
import moviedata
import qrc_resources
__version__ = "1.0.0"
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.movies = moviedata.MovieContainer()
self.table = QTableWidget()
self.setCentralWidget(self.table)
status = self.statusBar()
status.setSizeGripEnabled(False)
status.showMessage("Ready", 5000)
fileNewAction = self.createAction("&New...", self.fileNew,
QKeySequence.New, "filenew",
"Create a movie data file")
fileOpenAction = self.createAction("&Open...", self.fileOpen,
QKeySequence.Open, "fileopen",
"Open an existing movie data file")
fileSaveAction = self.createAction("&Save", self.fileSave,
QKeySequence.Save, "filesave", "Save the movie data")
fileSaveAsAction = self.createAction("Save &As...",
self.fileSaveAs, icon="filesaveas",
tip="Save the movie data using a new name")
fileImportDOMAction = self.createAction(
"&Import from XML (DOM)...", self.fileImportDOM,
tip="Import the movie data from an XML file")
fileImportSAXAction = self.createAction(
"I&mport from XML (SAX)...", self.fileImportSAX,
tip="Import the movie data from an XML file")
fileExportXmlAction = self.createAction(
"E&xport as XML...", self.fileExportXml,
tip="Export the movie data to an XML file")
fileQuitAction = self.createAction("&Quit", self.close,
"Ctrl+Q", "filequit", "Close the application")
editAddAction = self.createAction("&Add...", self.editAdd,
"Ctrl+A", "editadd", "Add data about a movie")
editEditAction = self.createAction("&Edit...", self.editEdit,
"Ctrl+E", "editedit", "Edit the current movie's data")
editRemoveAction = self.createAction("&Remove...",
self.editRemove, "Del", "editdelete",
"Remove a movie's data")
helpAboutAction = self.createAction("&About", self.helpAbout,
tip="About the application")
fileMenu = self.menuBar().addMenu("&File")
self.addActions(fileMenu, (fileNewAction, fileOpenAction,
fileSaveAction, fileSaveAsAction, None,
fileImportDOMAction, fileImportSAXAction,
fileExportXmlAction, None, fileQuitAction))
editMenu = self.menuBar().addMenu("&Edit")
self.addActions(editMenu, (editAddAction, editEditAction,
editRemoveAction))
helpMenu = self.menuBar().addMenu("&Help")
self.addActions(helpMenu, (helpAboutAction,))
fileToolbar = self.addToolBar("File")
fileToolbar.setObjectName("FileToolBar")
self.addActions(fileToolbar, (fileNewAction, fileOpenAction,
fileSaveAsAction))
editToolbar = self.addToolBar("Edit")
editToolbar.setObjectName("EditToolBar")
self.addActions(editToolbar, (editAddAction, editEditAction,
editRemoveAction))
self.connect(self.table,
SIGNAL("itemDoubleClicked(QTableWidgetItem*)"),
self.editEdit)
QShortcut(QKeySequence("Return"), self.table, self.editEdit)
settings = QSettings()
self.restoreGeometry(
settings.value("MainWindow/Geometry").toByteArray())
self.restoreState(settings.value("MainWindow/State").toByteArray())
self.setWindowTitle("My Movies")
QTimer.singleShot(0, self.loadInitialFile)
def createAction(self, text, slot=None, shortcut=None, icon=None,
tip=None, checkable=False, signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/{0}.png".format(icon)))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
@staticmethod
def addActions(target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def closeEvent(self, event):
if self.okToContinue():
settings = QSettings()
settings.setValue("LastFile",
QVariant(self.movies.filename()))
settings.setValue("MainWindow/Geometry",
QVariant(self.saveGeometry()))
settings.setValue("MainWindow/State",
QVariant(self.saveState()))
else:
event.ignore()
def okToContinue(self):
if self.movies.isDirty():
reply = QMessageBox.question(self,
"My Movies - Unsaved Changes",
"Save unsaved changes?",
QMessageBox.Yes|QMessageBox.No|QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
return False
elif reply == QMessageBox.Yes:
return self.fileSave()
return True
def loadInitialFile(self):
settings = QSettings()
fname = settings.value("LastFile").toString()
if fname and QFile.exists(fname):
_, msg = self.movies.load(fname)
self.statusBar().showMessage(msg, 5000)
self.updateTable()
def updateTable(self, current=None):
self.table.clear()
self.table.setRowCount(len(self.movies))
self.table.setColumnCount(6)
self.table.setHorizontalHeaderLabels(["Title", "Year", "Mins",
"Acquired", "Location", "Notes"])
self.table.setAlternatingRowColors(True)
self.table.setEditTriggers(QTableWidget.NoEditTriggers)
self.table.setSelectionBehavior(QTableWidget.SelectRows)
self.table.setSelectionMode(QTableWidget.SingleSelection)
selected = None
for row, movie in enumerate(self.movies):
item = QTableWidgetItem(movie.title)
if current is not None and current == id(movie):
selected = item
item.setData(Qt.UserRole,
QVariant(long(id(movie))))
self.table.setItem(row, 0, item)
year = movie.year
if year != movie.UNKNOWNYEAR:
item = QTableWidgetItem("{0}".format(year))
item.setTextAlignment(Qt.AlignCenter)
self.table.setItem(row, 1, item)
minutes = movie.minutes
if minutes != movie.UNKNOWNMINUTES:
item = QTableWidgetItem("{0}".format(minutes))
item.setTextAlignment(Qt.AlignRight|Qt.AlignVCenter)
self.table.setItem(row, 2, item)
item = QTableWidgetItem(movie.acquired.toString(
moviedata.DATEFORMAT))
item.setTextAlignment(Qt.AlignRight|
Qt.AlignVCenter)
self.table.setItem(row, 3, item)
self.table.setItem(row, 4, QTableWidgetItem(movie.location))
notes = movie.notes
if notes.length() > 40:
notes = notes.left(39) + "..."
self.table.setItem(row, 5, QTableWidgetItem(notes))
self.table.resizeColumnsToContents()
if selected is not None:
selected.setSelected(True)
self.table.setCurrentItem(selected)
self.table.scrollToItem(selected)
def fileNew(self):
if not self.okToContinue():
return
self.movies.clear()
self.statusBar().clearMessage()
self.updateTable()
def fileOpen(self):
if not self.okToContinue():
return
path = (QFileInfo(self.movies.filename()).path()
if not self.movies.filename().isEmpty() else ".")
fname = QFileDialog.getOpenFileName(self,
"My Movies - Load Movie Data", path,
"My Movies data files ({0})".format(self.movies.formats()))
if not fname.isEmpty():
_, msg = self.movies.load(fname)
self.statusBar().showMessage(msg, 5000)
self.updateTable()
def fileSave(self):
if self.movies.filename().isEmpty():
return self.fileSaveAs()
else:
ok, msg = self.movies.save()
self.statusBar().showMessage(msg, 5000)
return ok
def fileSaveAs(self):
fname = (self.movies.filename()
if not self.movies.filename().isEmpty() else ".")
fname = QFileDialog.getSaveFileName(self,
"My Movies - Save Movie Data", fname,
"My Movies data files ({0})".format(self.movies.formats()))
if not fname.isEmpty():
if not fname.contains("."):
fname += ".mqb"
ok, msg = self.movies.save(fname)
self.statusBar().showMessage(msg, 5000)
return ok
return False
def fileImportDOM(self):
self.fileImport("dom")
def fileImportSAX(self):
self.fileImport("sax")
def fileImport(self, format):
if not self.okToContinue():
return
path = (QFileInfo(self.movies.filename()).path()
if not self.movies.filename().isEmpty() else ".")
fname = QFileDialog.getOpenFileName(self,
"My Movies - Import Movie Data", path,
"My Movies XML files (*.xml)")
if not fname.isEmpty():
if format == "dom":
_, msg = self.movies.importDOM(fname)
else:
_, msg = self.movies.importSAX(fname)
self.statusBar().showMessage(msg, 5000)
self.updateTable()
def fileExportXml(self):
fname = self.movies.filename()
if fname.isEmpty():
fname = "."
else:
i = fname.lastIndexOf(".")
if i > 0:
fname = fname.left(i)
fname += ".xml"
fname = QFileDialog.getSaveFileName(self,
"My Movies - Export Movie Data", fname,
"My Movies XML files (*.xml)")
if not fname.isEmpty():
if not fname.contains("."):
fname += ".xml"
_, msg = self.movies.exportXml(fname)
self.statusBar().showMessage(msg, 5000)
def editAdd(self):
form = addeditmoviedlg.AddEditMovieDlg(self.movies, None,
self)
if form.exec_():
self.updateTable(id(form.movie))
def editEdit(self):
movie = self.currentMovie()
if movie is not None:
form = addeditmoviedlg.AddEditMovieDlg(self.movies,
movie, self)
if form.exec_():
self.updateTable(id(movie))
def editRemove(self):
movie = self.currentMovie()
if movie is not None:
year = (" {0}".format(movie.year)
if movie.year != movie.UNKNOWNYEAR else "")
if (QMessageBox.question(self,
"My Movies - Delete Movie",
"Delete Movie `{0}' {1}?".format(
movie.title, year),
QMessageBox.Yes|QMessageBox.No) ==
QMessageBox.Yes):
self.movies.delete(movie)
self.updateTable()
def currentMovie(self):
row = self.table.currentRow()
if row > -1:
item = self.table.item(row, 0)
id = item.data(Qt.UserRole).toLongLong()[0]
return self.movies.movieFromId(id)
return None
def helpAbout(self):
QMessageBox.about(self, "My Movies - About",
"""<b>My Movies</b> v {0}
<p>Copyright © 2008 Qtrac Ltd.
All rights reserved.
<p>This application can be used to view some basic
information about movies and to load and save the
movie data in a variety of custom file formats.
<p>Python {1} - Qt {2} - PyQt {3} on {4}""".format(
__version__, platform.python_version(),
QT_VERSION_STR, PYQT_VERSION_STR,
platform.system()))
def main():
app = QApplication(sys.argv)
app.setOrganizationName("Qtrac Ltd.")
app.setOrganizationDomain("qtrac.eu")
app.setApplicationName("My Movies")
app.setWindowIcon(QIcon(":/icon.png"))
form = MainWindow()
form.show()
app.exec_()
main()
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import six
import re, copy, os
import frappe
from frappe import _
from frappe.utils import now, cint
from frappe.model import no_value_fields, default_fields
from frappe.model.document import Document
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
from frappe.desk.notifications import delete_notification_count_for
from frappe.modules import make_boilerplate
from frappe.model.db_schema import validate_column_name, validate_column_length, type_map
import frappe.website.render
# imports - third-party imports
import pymysql
from pymysql.constants import ER
class InvalidFieldNameError(frappe.ValidationError): pass
form_grid_templates = {
"fields": "templates/form_grid/fields.html"
}
class DocType(Document):
def get_feed(self):
return self.name
def validate(self):
"""Validate DocType before saving.
- Check if developer mode is set.
- Validate series
- Check fieldnames (duplication etc)
- Clear permission table for child tables
- Add `amended_from` and `amended_by` if Amendable"""
self.check_developer_mode()
self.validate_name()
if self.issingle:
self.allow_import = 0
self.is_submittable = 0
self.istable = 0
elif self.istable:
self.allow_import = 0
self.permissions = []
self.scrub_field_names()
self.scrub_options_in_select()
self.set_default_in_list_view()
self.validate_series()
self.validate_document_type()
validate_fields(self)
if self.istable:
# no permission records for child table
self.permissions = []
else:
validate_permissions(self)
self.make_amendable()
self.validate_website()
if not self.is_new():
self.before_update = frappe.get_doc('DocType', self.name)
if not self.is_new():
self.setup_fields_to_fetch()
if self.default_print_format and not self.custom:
frappe.throw(_('Standard DocType cannot have default print format, use Customize Form'))
def set_default_in_list_view(self):
'''Set default in-list-view for first 4 mandatory fields'''
if not [d.fieldname for d in self.fields if d.in_list_view]:
cnt = 0
for d in self.fields:
if d.reqd and not d.hidden and not d.fieldtype == "Table":
d.in_list_view = 1
cnt += 1
if cnt == 4: break
def check_developer_mode(self):
"""Throw exception if not developer mode or via patch"""
if frappe.flags.in_patch or frappe.flags.in_test:
return
if not frappe.conf.get("developer_mode") and not self.custom:
frappe.throw(_("Not in Developer Mode! Set in site_config.json or make 'Custom' DocType."))
def setup_fields_to_fetch(self):
'''Setup query to update values for newly set fetch values'''
try:
old_meta = frappe.get_meta(frappe.get_doc('DocType', self.name), cached=False)
old_fields_to_fetch = [df.fieldname for df in old_meta.get_fields_to_fetch()]
except frappe.DoesNotExistError:
old_fields_to_fetch = []
new_meta = frappe.get_meta(self, cached=False)
self.flags.update_fields_to_fetch_queries = []
if set(old_fields_to_fetch) != set([df.fieldname for df in new_meta.get_fields_to_fetch()]):
for df in new_meta.get_fields_to_fetch():
if df.fieldname not in old_fields_to_fetch:
link_fieldname, source_fieldname = df.options.split('.', 1)
link_df = new_meta.get_field(link_fieldname)
self.flags.update_fields_to_fetch_queries.append('''update
`tab{link_doctype}` source,
`tab{doctype}` target
set
target.`{fieldname}` = source.`{source_fieldname}`
where
target.`{link_fieldname}` = source.name
and ifnull(target.`{fieldname}`, '')="" '''.format(
link_doctype = link_df.options,
source_fieldname = source_fieldname,
doctype = self.name,
fieldname = df.fieldname,
link_fieldname = link_fieldname
))
def update_fields_to_fetch(self):
'''Update fetch values based on queries setup'''
if self.flags.update_fields_to_fetch_queries:
for query in self.flags.update_fields_to_fetch_queries:
frappe.db.sql(query)
def validate_document_type(self):
if self.document_type=="Transaction":
self.document_type = "Document"
if self.document_type=="Master":
self.document_type = "Setup"
def validate_website(self):
"""Ensure that website generator has field 'route'"""
if self.has_web_view:
# route field must be present
if not 'route' in [d.fieldname for d in self.fields]:
frappe.throw('Field "route" is mandatory for Web Views', title='Missing Field')
# clear website cache
frappe.website.render.clear_cache()
def change_modified_of_parent(self):
"""Change the timestamp of parent DocType if the current one is a child to clear caches."""
if frappe.flags.in_import:
return
parent_list = frappe.db.sql("""SELECT parent
from tabDocField where fieldtype="Table" and options=%s""", self.name)
for p in parent_list:
frappe.db.sql('UPDATE tabDocType SET modified=%s WHERE `name`=%s', (now(), p[0]))
def scrub_field_names(self):
"""Sluggify fieldnames if not set from Label."""
restricted = ('name','parent','creation','modified','modified_by',
'parentfield','parenttype','file_list', 'flags', 'docstatus')
for d in self.get("fields"):
if d.fieldtype:
if (not getattr(d, "fieldname", None)):
if d.label:
d.fieldname = d.label.strip().lower().replace(' ','_')
if d.fieldname in restricted:
d.fieldname = d.fieldname + '1'
else:
d.fieldname = d.fieldtype.lower().replace(" ","_") + "_" + str(d.idx)
# fieldnames should be lowercase
d.fieldname = d.fieldname.lower()
def scrub_options_in_select(self):
"""Strip options for whitespaces"""
for field in self.fields:
if field.fieldtype == "Select" and field.options is not None:
new_options = ""
for option in field.options.split("\n"):
new_options += option.strip()
new_options += "\n"
new_options = new_options.rstrip("\n")
field.options = new_options
def validate_series(self, autoname=None, name=None):
"""Validate if `autoname` property is correctly set."""
if not autoname: autoname = self.autoname
if not name: name = self.name
if not autoname and self.get("fields", {"fieldname":"naming_series"}):
self.autoname = "naming_series:"
# validate field name if autoname field:fieldname is used
if autoname and autoname.startswith('field:'):
field = autoname.split(":")[1]
if not field or field not in [ df.fieldname for df in self.fields ]:
frappe.throw(_("Invalid fieldname '{0}' in autoname".format(field)))
if autoname and (not autoname.startswith('field:')) \
and (not autoname.startswith('eval:')) \
and (not autoname.lower() in ('prompt', 'hash')) \
and (not autoname.startswith('naming_series:')):
prefix = autoname.split('.')[0]
used_in = frappe.db.sql('select name from tabDocType where substring_index(autoname, ".", 1) = %s and name!=%s', (prefix, name))
if used_in:
frappe.throw(_("Series {0} already used in {1}").format(prefix, used_in[0][0]))
def on_update(self):
"""Update database schema, make controller templates if `custom` is not set and clear cache."""
from frappe.model.db_schema import updatedb
self.delete_duplicate_custom_fields()
updatedb(self.name, self)
self.change_modified_of_parent()
make_module_and_roles(self)
self.update_fields_to_fetch()
from frappe import conf
if not self.custom and not (frappe.flags.in_import or frappe.flags.in_test) and conf.get('developer_mode'):
self.export_doc()
self.make_controller_template()
if self.has_web_view:
self.set_base_class_for_controller()
# update index
if not self.custom:
self.run_module_method("on_doctype_update")
if self.flags.in_insert:
self.run_module_method("after_doctype_insert")
delete_notification_count_for(doctype=self.name)
frappe.clear_cache(doctype=self.name)
if not frappe.flags.in_install and hasattr(self, 'before_update'):
self.sync_global_search()
# clear from local cache
if self.name in frappe.local.meta_cache:
del frappe.local.meta_cache[self.name]
def delete_duplicate_custom_fields(self):
if not (frappe.db.table_exists(self.name) and frappe.db.table_exists("Custom Field")):
return
fields = [d.fieldname for d in self.fields if d.fieldtype in type_map]
frappe.db.sql('''delete from
`tabCustom Field`
where
dt = {0} and fieldname in ({1})
'''.format('%s', ', '.join(['%s'] * len(fields))), tuple([self.name] + fields), as_dict=True)
def sync_global_search(self):
'''If global search settings are changed, rebuild search properties for this table'''
global_search_fields_before_update = [d.fieldname for d in
self.before_update.fields if d.in_global_search]
if self.before_update.show_name_in_global_search:
global_search_fields_before_update.append('name')
global_search_fields_after_update = [d.fieldname for d in
self.fields if d.in_global_search]
if self.show_name_in_global_search:
global_search_fields_after_update.append('name')
if set(global_search_fields_before_update) != set(global_search_fields_after_update):
now = (not frappe.request) or frappe.flags.in_test or frappe.flags.in_install
frappe.enqueue('frappe.utils.global_search.rebuild_for_doctype',
now=now, doctype=self.name)
def set_base_class_for_controller(self):
'''Updates the controller class to subclass from `WebsiteGenertor`,
if it is a subclass of `Document`'''
controller_path = frappe.get_module_path(frappe.scrub(self.module),
'doctype', frappe.scrub(self.name), frappe.scrub(self.name) + '.py')
with open(controller_path, 'r') as f:
code = f.read()
class_string = '\nclass {0}(Document)'.format(self.name.replace(' ', ''))
if '\nfrom frappe.model.document import Document' in code and class_string in code:
code = code.replace('from frappe.model.document import Document',
'from frappe.website.website_generator import WebsiteGenerator')
code = code.replace('class {0}(Document)'.format(self.name.replace(' ', '')),
'class {0}(WebsiteGenerator)'.format(self.name.replace(' ', '')))
with open(controller_path, 'w') as f:
f.write(code)
def run_module_method(self, method):
from frappe.modules import load_doctype_module
module = load_doctype_module(self.name, self.module)
if hasattr(module, method):
getattr(module, method)()
def before_rename(self, old, new, merge=False):
"""Throw exception if merge. DocTypes cannot be merged."""
if not self.custom and frappe.session.user != "Administrator":
frappe.throw(_("DocType can only be renamed by Administrator"))
self.check_developer_mode()
self.validate_name(new)
if merge:
frappe.throw(_("DocType can not be merged"))
def after_rename(self, old, new, merge=False):
"""Change table name using `RENAME TABLE` if table exists. Or update
`doctype` property for Single type."""
if self.issingle:
frappe.db.sql("""update tabSingles set doctype=%s where doctype=%s""", (new, old))
frappe.db.sql("""update tabSingles set value=%s
where doctype=%s and field='name' and value = %s""", (new, new, old))
else:
frappe.db.sql("rename table `tab%s` to `tab%s`" % (old, new))
def before_reload(self):
"""Preserve naming series changes in Property Setter."""
if not (self.issingle and self.istable):
self.preserve_naming_series_options_in_property_setter()
def preserve_naming_series_options_in_property_setter(self):
"""Preserve naming_series as property setter if it does not exist"""
naming_series = self.get("fields", {"fieldname": "naming_series"})
if not naming_series:
return
# check if atleast 1 record exists
if not (frappe.db.table_exists(self.name) and frappe.db.sql("select name from `tab{}` limit 1".format(self.name))):
return
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.name,
"property": "options", "field_name": "naming_series"})
if not existing_property_setter:
make_property_setter(self.name, "naming_series", "options", naming_series[0].options, "Text", validate_fields_for_doctype=False)
if naming_series[0].default:
make_property_setter(self.name, "naming_series", "default", naming_series[0].default, "Text", validate_fields_for_doctype=False)
def export_doc(self):
"""Export to standard folder `[module]/doctype/[name]/[name].json`."""
from frappe.modules.export_file import export_to_files
export_to_files(record_list=[['DocType', self.name]], create_init=True)
def import_doc(self):
"""Import from standard folder `[module]/doctype/[name]/[name].json`."""
from frappe.modules.import_module import import_from_files
import_from_files(record_list=[[self.module, 'doctype', self.name]])
def make_controller_template(self):
"""Make boilerplate controller template."""
make_boilerplate("controller._py", self)
if not self.istable:
make_boilerplate("test_controller._py", self.as_dict())
if not self.istable:
make_boilerplate("controller.js", self.as_dict())
#make_boilerplate("controller_list.js", self.as_dict())
if not os.path.exists(frappe.get_module_path(frappe.scrub(self.module),
'doctype', frappe.scrub(self.name), 'tests')):
make_boilerplate("test_controller.js", self.as_dict())
if self.has_web_view:
templates_path = frappe.get_module_path(frappe.scrub(self.module), 'doctype', frappe.scrub(self.name), 'templates')
if not os.path.exists(templates_path):
os.makedirs(templates_path)
make_boilerplate('templates/controller.html', self.as_dict())
make_boilerplate('templates/controller_row.html', self.as_dict())
def make_amendable(self):
"""If is_submittable is set, add amended_from docfields."""
if self.is_submittable:
if not frappe.db.sql("""select name from tabDocField
where fieldname = 'amended_from' and parent = %s""", self.name):
self.append("fields", {
"label": "Amended From",
"fieldtype": "Link",
"fieldname": "amended_from",
"options": self.name,
"read_only": 1,
"print_hide": 1,
"no_copy": 1
})
def get_max_idx(self):
"""Returns the highest `idx`"""
max_idx = frappe.db.sql("""select max(idx) from `tabDocField` where parent = %s""",
self.name)
return max_idx and max_idx[0][0] or 0
def validate_name(self, name=None):
if not name:
name = self.name
# a DocType's name should not start with a number or underscore
# and should only contain letters, numbers and underscore
if six.PY2:
is_a_valid_name = re.match("^(?![\W])[^\d_\s][\w ]+$", name)
else:
is_a_valid_name = re.match("^(?![\W])[^\d_\s][\w ]+$", name, flags = re.ASCII)
if not is_a_valid_name:
frappe.throw(_("DocType's name should start with a letter and it can only consist of letters, numbers, spaces and underscores"), frappe.NameError)
def validate_fields_for_doctype(doctype):
doc = frappe.get_doc("DocType", doctype)
doc.delete_duplicate_custom_fields()
validate_fields(frappe.get_meta(doctype, cached=False))
# this is separate because it is also called via custom field
def validate_fields(meta):
"""Validate doctype fields. Checks
1. There are no illegal characters in fieldnames
2. If fieldnames are unique.
3. Validate column length.
4. Fields that do have database columns are not mandatory.
5. `Link` and `Table` options are valid.
6. **Hidden** and **Mandatory** are not set simultaneously.
7. `Check` type field has default as 0 or 1.
8. `Dynamic Links` are correctly defined.
9. Precision is set in numeric fields and is between 1 & 6.
10. Fold is not at the end (if set).
11. `search_fields` are valid.
12. `title_field` and title field pattern are valid.
13. `unique` check is only valid for Data, Link and Read Only fieldtypes.
14. `unique` cannot be checked if there exist non-unique values.
:param meta: `frappe.model.meta.Meta` object to check."""
def check_illegal_characters(fieldname):
validate_column_name(fieldname)
def check_unique_fieldname(fieldname):
duplicates = list(filter(None, map(lambda df: df.fieldname==fieldname and str(df.idx) or None, fields)))
if len(duplicates) > 1:
frappe.throw(_("Fieldname {0} appears multiple times in rows {1}").format(fieldname, ", ".join(duplicates)))
def check_fieldname_length(fieldname):
validate_column_length(fieldname)
def check_illegal_mandatory(d):
if (d.fieldtype in no_value_fields) and d.fieldtype!="Table" and d.reqd:
frappe.throw(_("Field {0} of type {1} cannot be mandatory").format(d.label, d.fieldtype))
def check_link_table_options(d):
if d.fieldtype in ("Link", "Table"):
if not d.options:
frappe.throw(_("Options required for Link or Table type field {0} in row {1}").format(d.label, d.idx))
if d.options=="[Select]" or d.options==d.parent:
return
if d.options != d.parent:
options = frappe.db.get_value("DocType", d.options, "name")
if not options:
frappe.throw(_("Options must be a valid DocType for field {0} in row {1}").format(d.label, d.idx))
elif not (options == d.options):
frappe.throw(_("Options {0} must be the same as doctype name {1} for the field {2}")
.format(d.options, options, d.label))
else:
# fix case
d.options = options
def check_hidden_and_mandatory(d):
if d.hidden and d.reqd and not d.default:
frappe.throw(_("Field {0} in row {1} cannot be hidden and mandatory without default").format(d.label, d.idx))
def check_width(d):
if d.fieldtype == "Currency" and cint(d.width) < 100:
frappe.throw(_("Max width for type Currency is 100px in row {0}").format(d.idx))
def check_in_list_view(d):
if d.in_list_view and (d.fieldtype in not_allowed_in_list_view):
frappe.throw(_("'In List View' not allowed for type {0} in row {1}").format(d.fieldtype, d.idx))
def check_in_global_search(d):
if d.in_global_search and d.fieldtype in no_value_fields:
frappe.throw(_("'In Global Search' not allowed for type {0} in row {1}")
.format(d.fieldtype, d.idx))
def check_dynamic_link_options(d):
if d.fieldtype=="Dynamic Link":
doctype_pointer = list(filter(lambda df: df.fieldname==d.options, fields))
if not doctype_pointer or (doctype_pointer[0].fieldtype not in ("Link", "Select")) \
or (doctype_pointer[0].fieldtype=="Link" and doctype_pointer[0].options!="DocType"):
frappe.throw(_("Options 'Dynamic Link' type of field must point to another Link Field with options as 'DocType'"))
def check_illegal_default(d):
if d.fieldtype == "Check" and d.default and d.default not in ('0', '1'):
frappe.throw(_("Default for 'Check' type of field must be either '0' or '1'"))
if d.fieldtype == "Select" and d.default and (d.default not in d.options.split("\n")):
frappe.throw(_("Default for {0} must be an option").format(d.fieldname))
def check_precision(d):
if d.fieldtype in ("Currency", "Float", "Percent") and d.precision is not None and not (1 <= cint(d.precision) <= 6):
frappe.throw(_("Precision should be between 1 and 6"))
def check_unique_and_text(d):
if meta.issingle:
d.unique = 0
d.search_index = 0
if getattr(d, "unique", False):
if d.fieldtype not in ("Data", "Link", "Read Only"):
frappe.throw(_("Fieldtype {0} for {1} cannot be unique").format(d.fieldtype, d.label))
if not d.get("__islocal"):
try:
has_non_unique_values = frappe.db.sql("""select `{fieldname}`, count(*)
from `tab{doctype}` where ifnull({fieldname}, '') != ''
group by `{fieldname}` having count(*) > 1 limit 1""".format(
doctype=d.parent, fieldname=d.fieldname))
except pymysql.InternalError as e:
if e.args and e.args[0] == ER.BAD_FIELD_ERROR:
# ignore if missing column, else raise
# this happens in case of Custom Field
pass
else:
raise
else:
# else of try block
if has_non_unique_values and has_non_unique_values[0][0]:
frappe.throw(_("Field '{0}' cannot be set as Unique as it has non-unique values").format(d.label))
if d.search_index and d.fieldtype in ("Text", "Long Text", "Small Text", "Code", "Text Editor"):
frappe.throw(_("Fieldtype {0} for {1} cannot be indexed").format(d.fieldtype, d.label))
def check_fold(fields):
fold_exists = False
for i, f in enumerate(fields):
if f.fieldtype=="Fold":
if fold_exists:
frappe.throw(_("There can be only one Fold in a form"))
fold_exists = True
if i < len(fields)-1:
nxt = fields[i+1]
if nxt.fieldtype != "Section Break":
frappe.throw(_("Fold must come before a Section Break"))
else:
frappe.throw(_("Fold can not be at the end of the form"))
def check_search_fields(meta, fields):
"""Throw exception if `search_fields` don't contain valid fields."""
if not meta.search_fields:
return
# No value fields should not be included in search field
search_fields = [field.strip() for field in (meta.search_fields or "").split(",")]
fieldtype_mapper = { field.fieldname: field.fieldtype \
for field in filter(lambda field: field.fieldname in search_fields, fields) }
for fieldname in search_fields:
fieldname = fieldname.strip()
if (fieldtype_mapper.get(fieldname) in no_value_fields) or \
(fieldname not in fieldname_list):
frappe.throw(_("Search field {0} is not valid").format(fieldname))
def check_title_field(meta):
"""Throw exception if `title_field` isn't a valid fieldname."""
if not meta.get("title_field"):
return
if meta.title_field not in fieldname_list:
frappe.throw(_("Title field must be a valid fieldname"), InvalidFieldNameError)
def _validate_title_field_pattern(pattern):
if not pattern:
return
for fieldname in re.findall("{(.*?)}", pattern, re.UNICODE):
if fieldname.startswith("{"):
# edge case when double curlies are used for escape
continue
if fieldname not in fieldname_list:
frappe.throw(_("{{{0}}} is not a valid fieldname pattern. It should be {{field_name}}.").format(fieldname),
InvalidFieldNameError)
df = meta.get("fields", filters={"fieldname": meta.title_field})[0]
if df:
_validate_title_field_pattern(df.options)
_validate_title_field_pattern(df.default)
def check_image_field(meta):
'''check image_field exists and is of type "Attach Image"'''
if not meta.image_field:
return
df = meta.get("fields", {"fieldname": meta.image_field})
if not df:
frappe.throw(_("Image field must be a valid fieldname"), InvalidFieldNameError)
if df[0].fieldtype != 'Attach Image':
frappe.throw(_("Image field must be of type Attach Image"), InvalidFieldNameError)
def check_is_published_field(meta):
if not meta.is_published_field:
return
if meta.is_published_field not in fieldname_list:
frappe.throw(_("Is Published Field must be a valid fieldname"), InvalidFieldNameError)
def check_timeline_field(meta):
if not meta.timeline_field:
return
if meta.timeline_field not in fieldname_list:
frappe.throw(_("Timeline field must be a valid fieldname"), InvalidFieldNameError)
df = meta.get("fields", {"fieldname": meta.timeline_field})[0]
if df.fieldtype not in ("Link", "Dynamic Link"):
frappe.throw(_("Timeline field must be a Link or Dynamic Link"), InvalidFieldNameError)
def check_sort_field(meta):
'''Validate that sort_field(s) is a valid field'''
if meta.sort_field:
sort_fields = [meta.sort_field]
if ',' in meta.sort_field:
sort_fields = [d.split()[0] for d in meta.sort_field.split(',')]
for fieldname in sort_fields:
if not fieldname in fieldname_list + list(default_fields):
frappe.throw(_("Sort field {0} must be a valid fieldname").format(fieldname),
InvalidFieldNameError)
def check_illegal_depends_on_conditions(docfield):
''' assignment operation should not be allowed in the depends on condition.'''
depends_on_fields = ["depends_on", "collapsible_depends_on"]
for field in depends_on_fields:
depends_on = docfield.get(field, None)
if depends_on and ("=" in depends_on) and \
re.match("""[\w\.:_]+\s*={1}\s*[\w\.@'"]+""", depends_on):
frappe.throw(_("Invalid {0} condition").format(frappe.unscrub(field)), frappe.ValidationError)
fields = meta.get("fields")
fieldname_list = [d.fieldname for d in fields]
not_allowed_in_list_view = list(copy.copy(no_value_fields))
not_allowed_in_list_view.append("Attach Image")
if meta.istable:
not_allowed_in_list_view.remove('Button')
for d in fields:
if not d.permlevel: d.permlevel = 0
if d.fieldtype != "Table": d.allow_bulk_edit = 0
if d.fieldtype == "Barcode": d.ignore_xss_filter = 1
if not d.fieldname:
frappe.throw(_("Fieldname is required in row {0}").format(d.idx))
d.fieldname = d.fieldname.lower()
check_illegal_characters(d.fieldname)
check_unique_fieldname(d.fieldname)
check_fieldname_length(d.fieldname)
check_illegal_mandatory(d)
check_link_table_options(d)
check_dynamic_link_options(d)
check_hidden_and_mandatory(d)
check_in_list_view(d)
check_in_global_search(d)
check_illegal_default(d)
check_unique_and_text(d)
check_illegal_depends_on_conditions(d)
check_fold(fields)
check_search_fields(meta, fields)
check_title_field(meta)
check_timeline_field(meta)
check_is_published_field(meta)
check_sort_field(meta)
check_image_field(meta)
def validate_permissions_for_doctype(doctype, for_remove=False):
"""Validates if permissions are set correctly."""
doctype = frappe.get_doc("DocType", doctype)
validate_permissions(doctype, for_remove)
# save permissions
for perm in doctype.get("permissions"):
perm.db_update()
clear_permissions_cache(doctype.name)
def clear_permissions_cache(doctype):
frappe.clear_cache(doctype=doctype)
delete_notification_count_for(doctype)
for user in frappe.db.sql_list("""select
distinct `tabHas Role`.parent
from
`tabHas Role`,
tabDocPerm
where tabDocPerm.parent = %s
and tabDocPerm.role = `tabHas Role`.role""", doctype):
frappe.clear_cache(user=user)
def validate_permissions(doctype, for_remove=False):
permissions = doctype.get("permissions")
if not permissions:
frappe.msgprint(_('No Permissions Specified'), alert=True, indicator='orange')
issingle = issubmittable = isimportable = False
if doctype:
issingle = cint(doctype.issingle)
issubmittable = cint(doctype.is_submittable)
isimportable = cint(doctype.allow_import)
def get_txt(d):
return _("For {0} at level {1} in {2} in row {3}").format(d.role, d.permlevel, d.parent, d.idx)
def check_atleast_one_set(d):
if not d.read and not d.write and not d.submit and not d.cancel and not d.create:
frappe.throw(_("{0}: No basic permissions set").format(get_txt(d)))
def check_double(d):
has_similar = False
similar_because_of = ""
for p in permissions:
if p.role==d.role and p.permlevel==d.permlevel and p!=d:
if p.apply_user_permissions==d.apply_user_permissions:
has_similar = True
similar_because_of = _("Apply User Permissions")
break
elif p.if_owner==d.if_owner:
similar_because_of = _("If Owner")
has_similar = True
break
if has_similar:
frappe.throw(_("{0}: Only one rule allowed with the same Role, Level and {1}")\
.format(get_txt(d), similar_because_of))
def check_level_zero_is_set(d):
if cint(d.permlevel) > 0 and d.role != 'All':
has_zero_perm = False
for p in permissions:
if p.role==d.role and (p.permlevel or 0)==0 and p!=d:
has_zero_perm = True
break
if not has_zero_perm:
frappe.throw(_("{0}: Permission at level 0 must be set before higher levels are set").format(get_txt(d)))
for invalid in ("create", "submit", "cancel", "amend"):
if d.get(invalid): d.set(invalid, 0)
def check_permission_dependency(d):
if d.cancel and not d.submit:
frappe.throw(_("{0}: Cannot set Cancel without Submit").format(get_txt(d)))
if (d.submit or d.cancel or d.amend) and not d.write:
frappe.throw(_("{0}: Cannot set Submit, Cancel, Amend without Write").format(get_txt(d)))
if d.amend and not d.write:
frappe.throw(_("{0}: Cannot set Amend without Cancel").format(get_txt(d)))
if d.get("import") and not d.create:
frappe.throw(_("{0}: Cannot set Import without Create").format(get_txt(d)))
def remove_rights_for_single(d):
if not issingle:
return
if d.report:
frappe.msgprint(_("Report cannot be set for Single types"))
d.report = 0
d.set("import", 0)
d.set("export", 0)
for ptype, label in (
("set_user_permissions", _("Set User Permissions")),
("apply_user_permissions", _("Apply User Permissions"))):
if d.get(ptype):
d.set(ptype, 0)
frappe.msgprint(_("{0} cannot be set for Single types").format(label))
def check_if_submittable(d):
if d.submit and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Submit if not Submittable").format(get_txt(d)))
elif d.amend and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Amend if not Submittable").format(get_txt(d)))
def check_if_importable(d):
if d.get("import") and not isimportable:
frappe.throw(_("{0}: Cannot set import as {1} is not importable").format(get_txt(d), doctype))
for d in permissions:
if not d.permlevel:
d.permlevel=0
check_atleast_one_set(d)
if not for_remove:
check_double(d)
check_permission_dependency(d)
check_if_submittable(d)
check_if_importable(d)
check_level_zero_is_set(d)
remove_rights_for_single(d)
def make_module_and_roles(doc, perm_fieldname="permissions"):
"""Make `Module Def` and `Role` records if already not made. Called while installing."""
try:
if hasattr(doc,'restrict_to_domain') and doc.restrict_to_domain and \
not frappe.db.exists('Domain', doc.restrict_to_domain):
frappe.get_doc(dict(doctype='Domain', domain=doc.restrict_to_domain)).insert()
if not frappe.db.exists("Module Def", doc.module):
m = frappe.get_doc({"doctype": "Module Def", "module_name": doc.module})
m.app_name = frappe.local.module_app[frappe.scrub(doc.module)]
m.flags.ignore_mandatory = m.flags.ignore_permissions = True
m.insert()
default_roles = ["Administrator", "Guest", "All"]
roles = [p.role for p in doc.get("permissions") or []] + default_roles
for role in list(set(roles)):
if not frappe.db.exists("Role", role):
r = frappe.get_doc(dict(doctype= "Role", role_name=role, desk_access=1))
r.flags.ignore_mandatory = r.flags.ignore_permissions = True
r.insert()
except frappe.DoesNotExistError as e:
pass
except frappe.SQLError as e:
if e.args[0]==1146:
pass
else:
raise
def init_list(doctype):
"""Make boilerplate list views."""
doc = frappe.get_meta(doctype)
make_boilerplate("controller_list.js", doc)
make_boilerplate("controller_list.html", doc)
def check_if_fieldname_conflicts_with_methods(doctype, fieldname):
doc = frappe.get_doc({"doctype": doctype})
method_list = [method for method in dir(doc) if isinstance(method, str) and callable(getattr(doc, method))]
if fieldname in method_list:
frappe.throw(_("Fieldname {0} conflicting with meta object").format(fieldname))
|
|
"""Module for preparing data from NCBI. Most low layer module for manipulating data."""
import os
import pickle
from collections import defaultdict
from Bio import Entrez
from Bio import SeqIO
# TODO: move to init
CACHE_DIR = "../../Diploma/cache"
if not os.path.isdir(CACHE_DIR):
CACHE_DIR = "cache/"
if not os.path.isdir(CACHE_DIR):
os.makedirs(CACHE_DIR)
# ************ NCBI RECORD OPERATIONS ************ #
def get_gids(term="Viruses[Organism] AND srcdb_refseq[PROP] AND complete_genome"):
"""
Get genome IDs for given search term.
:param term: search term for NCBI query
:return: list of genome IDs for given term
"""
# term = "Viruses[Organism] AND srcdb_refseq[PROP] AND complete_genome"
handle = Entrez.esearch(db="nucleotide", term=term, retmax=100000)
record = Entrez.read(handle)
id_list = sorted(set(record["IdList"]))
print((record["Count"], len(record["IdList"]), len(id_list)))
return id_list
def get_rec(rec_id):
"""
Get record for given genome id.
:param rec_id: genome id
:return: record
"""
try:
rec = pickle.load(open(os.path.join(CACHE_DIR, "%s.pkl.gz" % rec_id), "rb"))
except IOError: # , FileNotFoundError:
print(("downloading sequence id:", rec_id))
handle = Entrez.efetch(db="nucleotide", rettype="gb", id=rec_id)
rec = SeqIO.read(handle, "gb")
handle.close()
pickle.dump(rec, open(os.path.join(CACHE_DIR, "%s.pkl.gz" % rec_id), "wb"), -1)
print(("genome size:", len(rec.seq), rec.seq[:20] + "..."))
print(("Taxonomy:", rec.annotations['taxonomy']))
for a, t in list(rec.annotations.items()):
print((" %s: %s" % (a, str(t)[:15])))
print()
return rec
def get_gene(rec):
"""
Get record and return gene sequence.
:param rec: record
:return: gene sequence
"""
sequence = ""
for f in rec.features:
if f.type == "gene":
start = f.location.nofuzzy_start
end = f.location.nofuzzy_end
if f.location.strand == 1:
sequence += rec.seq[start:end]
else:
# ??
sequence += rec.seq[start:end].complement()
return str(sequence)
def load_oid_seq_classification(ids):
"""
Build dictionary of sequences and taxonomies for every genome ID.
:param ids: genome IDs
:return: sequences and taxonomy annotations dictionaries for every genome ID
"""
seq = defaultdict(list)
tax = {}
for oid in ids:
rec = get_rec(oid)
seq[oid] = str(rec.seq)
tax[oid] = ';'.join(rec.annotations["taxonomy"])
return seq, tax
# ************ TAXONOMY OPERATIONS ************ #
def rec_dd():
"""
Create dictionary of dictionaries to 'simulate' tree.
:return: dictionary of dictionaries
"""
return defaultdict(rec_dd)
def update_taxonomy(taxonomy, tax_path, genome_id):
"""
Create dictionary with taxonomy name and IDs of sequences which belongs to specific taxonomy.
:param taxonomy: current taxonomy
:param tax_path: taxonomy path
:param genome_id: genome_id
:return: updated taxonomy
"""
if not tax_path:
return taxonomy
tax = tax_path[0].lower()
if tax in taxonomy: # check if tax in taxonomy and update
# temp_taxonomy[tax]["data"].append(seq_record.annotations["gi"])
taxonomy[tax]["data"].append(genome_id)
# taxonomy[tax]["data"].append(get_gene(rec))
update_taxonomy(taxonomy[tax], tax_path[1:], genome_id)
else:
# temp_taxonomy[tax] = {"data": list({seq_record.annotations["gi"]})}
taxonomy[tax] = {"data": list({genome_id})}
# taxonomy[tax] = dict({"data": list({get_gene(rec)})})
temp = update_taxonomy(taxonomy[tax], tax_path[1:], genome_id)
if len(temp) > 1: # 1 = data, 2 = data + key
taxonomy = temp
return taxonomy
def filter_classification(rec, to_filter):
"""
Check if record is in filter list.
:param rec: record
:param to_filter: filter list
:return: bool
"""
in_to_filter = False
for temp_tax in rec.annotations["taxonomy"]:
temp_tax = temp_tax.lower().split()
for temp_tax_el in temp_tax:
if temp_tax_el in to_filter:
in_to_filter = True
print("filtered ", rec.annotations["taxonomy"])
return in_to_filter
def print_nice(taxonomy, level=0):
"""
Print taxonomy with tabs.
:param taxonomy: taxonomy
:param level: current level
:return:
"""
for i in sorted(taxonomy.keys()):
if i == "data":
if len(taxonomy) == 1:
return
else:
continue
else:
print(level * "\t", i.replace("->", "", 1), len(taxonomy[i]["data"]))
print_nice(taxonomy[i], level + 1)
def load_whole_taxonomy():
"""
Build taxonomy and get list ids and labels.
:return: data, label
"""
taxonomy = get_taxonomy(get_gids())
list_nodes = get_list_nodes_ids_labels(taxonomy)
data, labels = list(zip(*list_nodes))
for label in labels:
print(label)
label_number = -1
temp_l = []
label_n = []
for l in labels:
if l not in temp_l:
temp_l.append(l)
label_number += 1
label_n.append(label_number)
return data, label_n
def get_taxonomy(id_list, count=-1):
# call: python get_viral_sequence.py>log.out 2>log.err
# all virus sequences
# term = "Viruses[Organism] NOT srcdb_refseq[PROP] NOT cellular organisms[ORGN] AND
# nuccore genome samespecies[Filter] NOT nuccore genome[filter] NOT gbdiv syn[prop]"
# only reference (refSEQ) virues sequences
# see distinction between the two, here:
# http://www.ncbi.nlm.nih.gov/genomes/GenomesHome.cgi?taxid=10239&hopt=faq
"""
Build taxonomy from Entrez search.
:param id_list: list of genome ids we want to build taxonomy tree from
:param count: how many elements we want in taxonomy; -1 means whole taxonomy
:return: taxonomy
"""
taxonomy = rec_dd()
temp_count = 1
for genome_id in id_list:
try:
rec = get_rec(genome_id)
in_filter = filter_classification(rec, list({"bacteria", "unclassified", "unassigned"}))
if not in_filter:
update_taxonomy(taxonomy, rec.annotations["taxonomy"], genome_id)
if count != -1:
if temp_count == count:
break
temp_count += 1
except IOError as e:
# efetch - Raises an IOError exception if there's a network error.
# http://biopython.org/DIST/docs/api/Bio.Entrez-module.html
print("IOError raised...")
print(e)
except ValueError as v:
# http: // biopython.org / DIST / docs / api / Bio.SeqIO - module.html # read
print("problems with handling SeqIO...")
print(v)
except pickle.PicklingError as p:
# https://docs.python.org/2/library/pickle.html#pickle.PicklingError
print("problems with pickling object...")
print(p)
return taxonomy
def remove_small_nodes(taxonomy, threshold_size=100):
"""
Remove small nodes from dataset.
:param taxonomy: input taxonomy
:param threshold_size: how many nodes do parent need to keep it
:return: output taxonomy
"""
if isinstance(taxonomy, (defaultdict, dict)):
taxonomy_keys = [x for x in list(taxonomy.keys()) if x != "data"]
for i in taxonomy_keys:
print(i, len(taxonomy[i]['data']))
if len(taxonomy[i]['data']) < threshold_size:
taxonomy.pop(i)
else:
remove_small_nodes(taxonomy[i])
else:
return taxonomy
# ************ LIST OPERATIONS ************ #
def remove_lists(taxonomy):
"""
Remove all list nodes from taxonomy.
:param taxonomy: taxonomy
:return: taxonomy
"""
# check for recurse exit
if isinstance(taxonomy, (defaultdict, dict)):
for i in [x for x in list(taxonomy.keys()) if x != "data"]:
if set(taxonomy[i]) == set(list({"data"})):
# if parent has only one list node, remove it
# if len([x for x in taxonomy.keys() if x != "data"]) == 1:
taxonomy.pop(i)
continue
else:
remove_lists(taxonomy[i])
else:
return taxonomy
def get_list_nodes_unique(taxonomy, parent=""):
"""
Get taxonomy and return unique list nodes.
:param taxonomy: taxonomy
:param parent: parent of current node
:return: unique list nodes
"""
# checked by hand and it works as expected
list_nodes = list()
keys = [x for x in list(taxonomy.keys()) if x != "data"]
for i in keys:
if set(taxonomy[i]) == set(list({"data"})):
list_nodes.append(i)
else:
list_nodes += get_list_nodes_unique(taxonomy[i], parent + "->" + i)
return list_nodes
def count_list_nodes(taxonomy):
"""
Count list nodes and return sum.
:param taxonomy: taxonomy
:return: int
"""
count = 0
keys = [x for x in list(taxonomy.keys()) if x != "data"]
for i in keys:
if set(taxonomy[i]) == set(list({"data"})):
if i == keys[-1]:
count += 1
return count
else:
count += 1
else:
count += count_list_nodes(taxonomy[i])
return count
def get_list_nodes_ids_labels(taxonomy):
"""
Get taxonomy and return tuples of all list nodes.
:param taxonomy: taxonomy
:return: list of tuples (id, class)
"""
if len(list(taxonomy.keys())) > 1 or list(taxonomy.keys()) == ["viruses"]:
temp = []
for k in [x for x in list(taxonomy.keys()) if x != "data"]:
temp += get_list_nodes_ids_labels(taxonomy[k])
return temp
# else:
# return [(x, parent) for x in taxonomy["data"]]
# ************ ALL NODES OPERATIONS ************ #
def count_examples(taxonomy):
"""
Get taxonomy, count examples in every node and return sum.
:param taxonomy: taxonomy
:return: sum of examples
"""
count = 0
keys = [x for x in list(taxonomy.keys()) if x != "data"]
for i in keys:
if set(taxonomy[i]) == set(list({"data"})):
if i == keys[-1]:
count += len(taxonomy[i]["data"])
return count
else:
count += len(taxonomy[i]["data"])
else:
count += count_examples(taxonomy[i])
return count
def get_all_nodes(taxonomy, parent=""):
"""
Get taxonomy and return all nodes (including list nodes).
:param parent: parent of current node - default ""
:param taxonomy: taxonomy
:return: all nodes
"""
all_nodes = list()
keys = [x for x in list(taxonomy.keys()) if x != "data"]
for i in keys:
# if we want all non-list nodes, than this stays, otherwise comment this
# if len([x for x in taxonomy[i].keys() if x != "data"]) == 0:
# continue
if i == "rest":
all_nodes.append(parent + "->" + i)
else:
all_nodes.append(i)
all_nodes += get_all_nodes(taxonomy[i], i)
return all_nodes
# ************ OTHER ************ #
def load_seqs_from_ncbi(seq_len=100, skip_read=0, overlap=50, taxonomy_el_count=-1):
"""
Load sequences from NCBI database.
Prepare sequences, sliced to seq_len length. Skip every skip_read and overlap two reads with overlap nucleotides.
Overlap 50 means that half of the read is going to be overlapped with next read.
If seq_len is -1, load whole sequences (do not strip them) - usually using with fasta format as we slice sequences
later.
:param seq_len: read length
:param skip_read: number of skipped reads
:param overlap: overlapping nucleotides count
:param taxonomy_el_count: how many elements we want in taxonomy; -1 means whole taxonomy
:return: dictionary reads - each genome ID key contains list of reads for specific genome,
dictionary taxonomy - each genome ID key contains taxonomy for specific genome
"""
data, _ = run(taxonomy_el_count)
print("getting sequences...")
seqs, tax = load_oid_seq_classification(data)
reads = defaultdict(list)
if seq_len != -1:
for oid, seq in seqs.items():
while seq:
if len(seq) < seq_len:
# we don't want shorter sequences than seq_len (shorter than 100)
break
reads[oid].append(seq[:seq_len])
seq = seq[seq_len - overlap + ((seq_len - overlap) * skip_read):]
else:
reads = seqs
return reads, tax
def run(taxonomy_el_count=-1):
"""
Build taxonomy and get list ids and labels.
:param taxonomy_el_count: how many elements we want in taxonomy; -1 means whole taxonomy
:return: data, label
"""
taxonomy = get_taxonomy(get_gids(), count=taxonomy_el_count)
# remove_lists(taxonomy)
print_nice(taxonomy)
remove_small_nodes(taxonomy, 100)
print_nice(taxonomy)
list_nodes = get_list_nodes_ids_labels(taxonomy)
data, labels = list(zip(*list_nodes))
# for label in labels:
# print label
label_number = -1
temp_l = []
label_n = []
for l in labels:
if l not in temp_l:
temp_l.append(l)
label_number += 1
label_n.append(label_number)
return data, label_n
if __name__ == "__main__":
# a = load_seqs_from_ncbi(taxonomy_el_count=20)
temp_taxonomy = get_taxonomy(get_gids())
print("no of examples after taxonomy was built: %d" % count_examples(temp_taxonomy))
print("no of list nodes after taxonomy was built: %d" % count_list_nodes(temp_taxonomy))
print_nice(temp_taxonomy)
remove_small_nodes(temp_taxonomy, 100)
# remove_lists(temp_taxonomy)
print_nice(temp_taxonomy)
run()
|
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to perform basic operations with the
Google Cloud Vision API.
Example Usage:
python detect.py text ./resources/wakeupcat.jpg
python detect.py labels ./resources/landmark.jpg
python detect.py web ./resources/landmark.jpg
python detect.py web-uri http://wheresgus.com/dog.JPG
python detect.py web-geo ./resources/city.jpg
python detect.py faces-uri gs://your-bucket/file.jpg
python detect.py ocr-uri gs://python-docs-samples-tests/HodgeConj.pdf \
gs://BUCKET_NAME/PREFIX/
python detect.py object-localization ./resources/puppies.jpg
python detect.py object-localization-uri gs://...
For more information, the documentation at
https://cloud.google.com/vision/docs.
"""
import argparse
# [START vision_face_detection]
def detect_faces(path):
"""Detects faces in an image."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_face_detection]
# [START vision_python_migration_image_file]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
# [END vision_python_migration_image_file]
response = client.face_detection(image=image)
faces = response.face_annotations
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
print('Faces:')
for face in faces:
print('anger: {}'.format(likelihood_name[face.anger_likelihood]))
print('joy: {}'.format(likelihood_name[face.joy_likelihood]))
print('surprise: {}'.format(likelihood_name[face.surprise_likelihood]))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in face.bounding_poly.vertices])
print('face bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_face_detection]
# [END vision_face_detection]
# [START vision_face_detection_gcs]
def detect_faces_uri(uri):
"""Detects faces in the file located in Google Cloud Storage or the web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_image_uri]
image = vision.Image()
image.source.image_uri = uri
# [END vision_python_migration_image_uri]
response = client.face_detection(image=image)
faces = response.face_annotations
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
print('Faces:')
for face in faces:
print('anger: {}'.format(likelihood_name[face.anger_likelihood]))
print('joy: {}'.format(likelihood_name[face.joy_likelihood]))
print('surprise: {}'.format(likelihood_name[face.surprise_likelihood]))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in face.bounding_poly.vertices])
print('face bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_face_detection_gcs]
# [START vision_label_detection]
def detect_labels(path):
"""Detects labels in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_label_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.label_detection(image=image)
labels = response.label_annotations
print('Labels:')
for label in labels:
print(label.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_label_detection]
# [END vision_label_detection]
# [START vision_label_detection_gcs]
def detect_labels_uri(uri):
"""Detects labels in the file located in Google Cloud Storage or on the
Web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.label_detection(image=image)
labels = response.label_annotations
print('Labels:')
for label in labels:
print(label.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_label_detection_gcs]
# [START vision_landmark_detection]
def detect_landmarks(path):
"""Detects landmarks in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_landmark_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.landmark_detection(image=image)
landmarks = response.landmark_annotations
print('Landmarks:')
for landmark in landmarks:
print(landmark.description)
for location in landmark.locations:
lat_lng = location.lat_lng
print('Latitude {}'.format(lat_lng.latitude))
print('Longitude {}'.format(lat_lng.longitude))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_landmark_detection]
# [END vision_landmark_detection]
# [START vision_landmark_detection_gcs]
def detect_landmarks_uri(uri):
"""Detects landmarks in the file located in Google Cloud Storage or on the
Web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.landmark_detection(image=image)
landmarks = response.landmark_annotations
print('Landmarks:')
for landmark in landmarks:
print(landmark.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_landmark_detection_gcs]
# [START vision_logo_detection]
def detect_logos(path):
"""Detects logos in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_logo_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.logo_detection(image=image)
logos = response.logo_annotations
print('Logos:')
for logo in logos:
print(logo.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_logo_detection]
# [END vision_logo_detection]
# [START vision_logo_detection_gcs]
def detect_logos_uri(uri):
"""Detects logos in the file located in Google Cloud Storage or on the Web.
"""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.logo_detection(image=image)
logos = response.logo_annotations
print('Logos:')
for logo in logos:
print(logo.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_logo_detection_gcs]
# [START vision_safe_search_detection]
def detect_safe_search(path):
"""Detects unsafe features in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_safe_search_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.safe_search_detection(image=image)
safe = response.safe_search_annotation
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
print('Safe search:')
print('adult: {}'.format(likelihood_name[safe.adult]))
print('medical: {}'.format(likelihood_name[safe.medical]))
print('spoofed: {}'.format(likelihood_name[safe.spoof]))
print('violence: {}'.format(likelihood_name[safe.violence]))
print('racy: {}'.format(likelihood_name[safe.racy]))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_safe_search_detection]
# [END vision_safe_search_detection]
# [START vision_safe_search_detection_gcs]
def detect_safe_search_uri(uri):
"""Detects unsafe features in the file located in Google Cloud Storage or
on the Web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.safe_search_detection(image=image)
safe = response.safe_search_annotation
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
print('Safe search:')
print('adult: {}'.format(likelihood_name[safe.adult]))
print('medical: {}'.format(likelihood_name[safe.medical]))
print('spoofed: {}'.format(likelihood_name[safe.spoof]))
print('violence: {}'.format(likelihood_name[safe.violence]))
print('racy: {}'.format(likelihood_name[safe.racy]))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_safe_search_detection_gcs]
# [START vision_text_detection]
def detect_text(path):
"""Detects text in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_text_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
print('Texts:')
for text in texts:
print('\n"{}"'.format(text.description))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in text.bounding_poly.vertices])
print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_text_detection]
# [END vision_text_detection]
# [START vision_text_detection_gcs]
def detect_text_uri(uri):
"""Detects text in the file located in Google Cloud Storage or on the Web.
"""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.text_detection(image=image)
texts = response.text_annotations
print('Texts:')
for text in texts:
print('\n"{}"'.format(text.description))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in text.bounding_poly.vertices])
print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_text_detection_gcs]
# [START vision_image_property_detection]
def detect_properties(path):
"""Detects image properties in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_image_properties]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.image_properties(image=image)
props = response.image_properties_annotation
print('Properties:')
for color in props.dominant_colors.colors:
print('fraction: {}'.format(color.pixel_fraction))
print('\tr: {}'.format(color.color.red))
print('\tg: {}'.format(color.color.green))
print('\tb: {}'.format(color.color.blue))
print('\ta: {}'.format(color.color.alpha))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_image_properties]
# [END vision_image_property_detection]
# [START vision_image_property_detection_gcs]
def detect_properties_uri(uri):
"""Detects image properties in the file located in Google Cloud Storage or
on the Web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.image_properties(image=image)
props = response.image_properties_annotation
print('Properties:')
for color in props.dominant_colors.colors:
print('frac: {}'.format(color.pixel_fraction))
print('\tr: {}'.format(color.color.red))
print('\tg: {}'.format(color.color.green))
print('\tb: {}'.format(color.color.blue))
print('\ta: {}'.format(color.color.alpha))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_image_property_detection_gcs]
# [START vision_web_detection]
def detect_web(path):
"""Detects web annotations given an image."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_web_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.web_detection(image=image)
annotations = response.web_detection
if annotations.best_guess_labels:
for label in annotations.best_guess_labels:
print('\nBest guess label: {}'.format(label.label))
if annotations.pages_with_matching_images:
print('\n{} Pages with matching images found:'.format(
len(annotations.pages_with_matching_images)))
for page in annotations.pages_with_matching_images:
print('\n\tPage url : {}'.format(page.url))
if page.full_matching_images:
print('\t{} Full Matches found: '.format(
len(page.full_matching_images)))
for image in page.full_matching_images:
print('\t\tImage url : {}'.format(image.url))
if page.partial_matching_images:
print('\t{} Partial Matches found: '.format(
len(page.partial_matching_images)))
for image in page.partial_matching_images:
print('\t\tImage url : {}'.format(image.url))
if annotations.web_entities:
print('\n{} Web entities found: '.format(
len(annotations.web_entities)))
for entity in annotations.web_entities:
print('\n\tScore : {}'.format(entity.score))
print(u'\tDescription: {}'.format(entity.description))
if annotations.visually_similar_images:
print('\n{} visually similar images found:\n'.format(
len(annotations.visually_similar_images)))
for image in annotations.visually_similar_images:
print('\tImage url : {}'.format(image.url))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_web_detection]
# [END vision_web_detection]
# [START vision_web_detection_gcs]
def detect_web_uri(uri):
"""Detects web annotations in the file located in Google Cloud Storage."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.web_detection(image=image)
annotations = response.web_detection
if annotations.best_guess_labels:
for label in annotations.best_guess_labels:
print('\nBest guess label: {}'.format(label.label))
if annotations.pages_with_matching_images:
print('\n{} Pages with matching images found:'.format(
len(annotations.pages_with_matching_images)))
for page in annotations.pages_with_matching_images:
print('\n\tPage url : {}'.format(page.url))
if page.full_matching_images:
print('\t{} Full Matches found: '.format(
len(page.full_matching_images)))
for image in page.full_matching_images:
print('\t\tImage url : {}'.format(image.url))
if page.partial_matching_images:
print('\t{} Partial Matches found: '.format(
len(page.partial_matching_images)))
for image in page.partial_matching_images:
print('\t\tImage url : {}'.format(image.url))
if annotations.web_entities:
print('\n{} Web entities found: '.format(
len(annotations.web_entities)))
for entity in annotations.web_entities:
print('\n\tScore : {}'.format(entity.score))
print(u'\tDescription: {}'.format(entity.description))
if annotations.visually_similar_images:
print('\n{} visually similar images found:\n'.format(
len(annotations.visually_similar_images)))
for image in annotations.visually_similar_images:
print('\tImage url : {}'.format(image.url))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_web_detection_gcs]
# [START vision_web_detection_include_geo]
def web_entities_include_geo_results(path):
"""Detects web annotations given an image, using the geotag metadata
in the image to detect web entities."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
web_detection_params = vision.WebDetectionParams(
include_geo_results=True)
image_context = vision.ImageContext(
web_detection_params=web_detection_params)
response = client.web_detection(image=image, image_context=image_context)
for entity in response.web_detection.web_entities:
print('\n\tScore : {}'.format(entity.score))
print(u'\tDescription: {}'.format(entity.description))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_web_detection_include_geo]
# [START vision_web_detection_include_geo_gcs]
def web_entities_include_geo_results_uri(uri):
"""Detects web annotations given an image in the file located in
Google Cloud Storage., using the geotag metadata in the image to
detect web entities."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
web_detection_params = vision.WebDetectionParams(
include_geo_results=True)
image_context = vision.ImageContext(
web_detection_params=web_detection_params)
response = client.web_detection(image=image, image_context=image_context)
for entity in response.web_detection.web_entities:
print('\n\tScore : {}'.format(entity.score))
print(u'\tDescription: {}'.format(entity.description))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_web_detection_include_geo_gcs]
# [START vision_crop_hint_detection]
def detect_crop_hints(path):
"""Detects crop hints in an image."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_crop_hints]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
crop_hints_params = vision.CropHintsParams(aspect_ratios=[1.77])
image_context = vision.ImageContext(
crop_hints_params=crop_hints_params)
response = client.crop_hints(image=image, image_context=image_context)
hints = response.crop_hints_annotation.crop_hints
for n, hint in enumerate(hints):
print('\nCrop Hint: {}'.format(n))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in hint.bounding_poly.vertices])
print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_crop_hints]
# [END vision_crop_hint_detection]
# [START vision_crop_hint_detection_gcs]
def detect_crop_hints_uri(uri):
"""Detects crop hints in the file located in Google Cloud Storage."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
crop_hints_params = vision.CropHintsParams(aspect_ratios=[1.77])
image_context = vision.ImageContext(
crop_hints_params=crop_hints_params)
response = client.crop_hints(image=image, image_context=image_context)
hints = response.crop_hints_annotation.crop_hints
for n, hint in enumerate(hints):
print('\nCrop Hint: {}'.format(n))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in hint.bounding_poly.vertices])
print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_crop_hint_detection_gcs]
# [START vision_fulltext_detection]
def detect_document(path):
"""Detects document features in an image."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_document_text_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.document_text_detection(image=image)
for page in response.full_text_annotation.pages:
for block in page.blocks:
print('\nBlock confidence: {}\n'.format(block.confidence))
for paragraph in block.paragraphs:
print('Paragraph confidence: {}'.format(
paragraph.confidence))
for word in paragraph.words:
word_text = ''.join([
symbol.text for symbol in word.symbols
])
print('Word text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print('\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_document_text_detection]
# [END vision_fulltext_detection]
# [START vision_fulltext_detection_gcs]
def detect_document_uri(uri):
"""Detects document features in the file located in Google Cloud
Storage."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.document_text_detection(image=image)
for page in response.full_text_annotation.pages:
for block in page.blocks:
print('\nBlock confidence: {}\n'.format(block.confidence))
for paragraph in block.paragraphs:
print('Paragraph confidence: {}'.format(
paragraph.confidence))
for word in paragraph.words:
word_text = ''.join([
symbol.text for symbol in word.symbols
])
print('Word text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print('\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_fulltext_detection_gcs]
# [START vision_text_detection_pdf_gcs]
def async_detect_document(gcs_source_uri, gcs_destination_uri):
"""OCR with PDF/TIFF as source files on GCS"""
import json
import re
from google.cloud import vision
from google.cloud import storage
# Supported mime_types are: 'application/pdf' and 'image/tiff'
mime_type = 'application/pdf'
# How many pages should be grouped into each json output file.
batch_size = 2
client = vision.ImageAnnotatorClient()
feature = vision.Feature(
type_=vision.Feature.Type.DOCUMENT_TEXT_DETECTION)
gcs_source = vision.GcsSource(uri=gcs_source_uri)
input_config = vision.InputConfig(
gcs_source=gcs_source, mime_type=mime_type)
gcs_destination = vision.GcsDestination(uri=gcs_destination_uri)
output_config = vision.OutputConfig(
gcs_destination=gcs_destination, batch_size=batch_size)
async_request = vision.AsyncAnnotateFileRequest(
features=[feature], input_config=input_config,
output_config=output_config)
operation = client.async_batch_annotate_files(
requests=[async_request])
print('Waiting for the operation to finish.')
operation.result(timeout=420)
# Once the request has completed and the output has been
# written to GCS, we can list all the output files.
storage_client = storage.Client()
match = re.match(r'gs://([^/]+)/(.+)', gcs_destination_uri)
bucket_name = match.group(1)
prefix = match.group(2)
bucket = storage_client.get_bucket(bucket_name)
# List objects with the given prefix, filtering out folders.
blob_list = [blob for blob in list(bucket.list_blobs(
prefix=prefix)) if not blob.name.endswith('/')]
print('Output files:')
for blob in blob_list:
print(blob.name)
# Process the first output file from GCS.
# Since we specified batch_size=2, the first response contains
# the first two pages of the input file.
output = blob_list[0]
json_string = output.download_as_string()
response = json.loads(json_string)
# The actual response for the first page of the input file.
first_page_response = response['responses'][0]
annotation = first_page_response['fullTextAnnotation']
# Here we print the full text from the first page.
# The response contains more information:
# annotation/pages/blocks/paragraphs/words/symbols
# including confidence scores and bounding boxes
print('Full text:\n')
print(annotation['text'])
# [END vision_text_detection_pdf_gcs]
# [START vision_localize_objects]
def localize_objects(path):
"""Localize objects in the local image.
Args:
path: The path to the local file.
"""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
with open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
objects = client.object_localization(
image=image).localized_object_annotations
print('Number of objects found: {}'.format(len(objects)))
for object_ in objects:
print('\n{} (confidence: {})'.format(object_.name, object_.score))
print('Normalized bounding polygon vertices: ')
for vertex in object_.bounding_poly.normalized_vertices:
print(' - ({}, {})'.format(vertex.x, vertex.y))
# [END vision_localize_objects]
# [START vision_localize_objects_gcs]
def localize_objects_uri(uri):
"""Localize objects in the image on Google Cloud Storage
Args:
uri: The path to the file in Google Cloud Storage (gs://...)
"""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
objects = client.object_localization(
image=image).localized_object_annotations
print('Number of objects found: {}'.format(len(objects)))
for object_ in objects:
print('\n{} (confidence: {})'.format(object_.name, object_.score))
print('Normalized bounding polygon vertices: ')
for vertex in object_.bounding_poly.normalized_vertices:
print(' - ({}, {})'.format(vertex.x, vertex.y))
# [END vision_localize_objects_gcs]
def run_local(args):
if args.command == 'faces':
detect_faces(args.path)
elif args.command == 'labels':
detect_labels(args.path)
elif args.command == 'landmarks':
detect_landmarks(args.path)
elif args.command == 'text':
detect_text(args.path)
elif args.command == 'logos':
detect_logos(args.path)
elif args.command == 'safe-search':
detect_safe_search(args.path)
elif args.command == 'properties':
detect_properties(args.path)
elif args.command == 'web':
detect_web(args.path)
elif args.command == 'crophints':
detect_crop_hints(args.path)
elif args.command == 'document':
detect_document(args.path)
elif args.command == 'web-geo':
web_entities_include_geo_results(args.path)
elif args.command == 'object-localization':
localize_objects(args.path)
def run_uri(args):
if args.command == 'text-uri':
detect_text_uri(args.uri)
elif args.command == 'faces-uri':
detect_faces_uri(args.uri)
elif args.command == 'labels-uri':
detect_labels_uri(args.uri)
elif args.command == 'landmarks-uri':
detect_landmarks_uri(args.uri)
elif args.command == 'logos-uri':
detect_logos_uri(args.uri)
elif args.command == 'safe-search-uri':
detect_safe_search_uri(args.uri)
elif args.command == 'properties-uri':
detect_properties_uri(args.uri)
elif args.command == 'web-uri':
detect_web_uri(args.uri)
elif args.command == 'crophints-uri':
detect_crop_hints_uri(args.uri)
elif args.command == 'document-uri':
detect_document_uri(args.uri)
elif args.command == 'web-geo-uri':
web_entities_include_geo_results_uri(args.uri)
elif args.command == 'ocr-uri':
async_detect_document(args.uri, args.destination_uri)
elif args.command == 'object-localization-uri':
localize_objects_uri(args.uri)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
detect_faces_parser = subparsers.add_parser(
'faces', help=detect_faces.__doc__)
detect_faces_parser.add_argument('path')
faces_file_parser = subparsers.add_parser(
'faces-uri', help=detect_faces_uri.__doc__)
faces_file_parser.add_argument('uri')
detect_labels_parser = subparsers.add_parser(
'labels', help=detect_labels.__doc__)
detect_labels_parser.add_argument('path')
labels_file_parser = subparsers.add_parser(
'labels-uri', help=detect_labels_uri.__doc__)
labels_file_parser.add_argument('uri')
detect_landmarks_parser = subparsers.add_parser(
'landmarks', help=detect_landmarks.__doc__)
detect_landmarks_parser.add_argument('path')
landmark_file_parser = subparsers.add_parser(
'landmarks-uri', help=detect_landmarks_uri.__doc__)
landmark_file_parser.add_argument('uri')
detect_text_parser = subparsers.add_parser(
'text', help=detect_text.__doc__)
detect_text_parser.add_argument('path')
text_file_parser = subparsers.add_parser(
'text-uri', help=detect_text_uri.__doc__)
text_file_parser.add_argument('uri')
detect_logos_parser = subparsers.add_parser(
'logos', help=detect_logos.__doc__)
detect_logos_parser.add_argument('path')
logos_file_parser = subparsers.add_parser(
'logos-uri', help=detect_logos_uri.__doc__)
logos_file_parser.add_argument('uri')
safe_search_parser = subparsers.add_parser(
'safe-search', help=detect_safe_search.__doc__)
safe_search_parser.add_argument('path')
safe_search_file_parser = subparsers.add_parser(
'safe-search-uri',
help=detect_safe_search_uri.__doc__)
safe_search_file_parser.add_argument('uri')
properties_parser = subparsers.add_parser(
'properties', help=detect_properties.__doc__)
properties_parser.add_argument('path')
properties_file_parser = subparsers.add_parser(
'properties-uri',
help=detect_properties_uri.__doc__)
properties_file_parser.add_argument('uri')
# 1.1 Vision features
web_parser = subparsers.add_parser(
'web', help=detect_web.__doc__)
web_parser.add_argument('path')
web_uri_parser = subparsers.add_parser(
'web-uri',
help=detect_web_uri.__doc__)
web_uri_parser.add_argument('uri')
web_geo_parser = subparsers.add_parser(
'web-geo', help=web_entities_include_geo_results.__doc__)
web_geo_parser.add_argument('path')
web_geo_uri_parser = subparsers.add_parser(
'web-geo-uri',
help=web_entities_include_geo_results_uri.__doc__)
web_geo_uri_parser.add_argument('uri')
crop_hints_parser = subparsers.add_parser(
'crophints', help=detect_crop_hints.__doc__)
crop_hints_parser.add_argument('path')
crop_hints_uri_parser = subparsers.add_parser(
'crophints-uri', help=detect_crop_hints_uri.__doc__)
crop_hints_uri_parser.add_argument('uri')
document_parser = subparsers.add_parser(
'document', help=detect_document.__doc__)
document_parser.add_argument('path')
document_uri_parser = subparsers.add_parser(
'document-uri', help=detect_document_uri.__doc__)
document_uri_parser.add_argument('uri')
ocr_uri_parser = subparsers.add_parser(
'ocr-uri', help=async_detect_document.__doc__)
ocr_uri_parser.add_argument('uri')
ocr_uri_parser.add_argument('destination_uri')
object_localization_parser = subparsers.add_parser(
'object-localization', help=async_detect_document.__doc__)
object_localization_parser.add_argument('path')
object_localization_uri_parser = subparsers.add_parser(
'object-localization-uri', help=async_detect_document.__doc__)
object_localization_uri_parser.add_argument('uri')
args = parser.parse_args()
if 'uri' in args.command:
run_uri(args)
else:
run_local(args)
|
|
import time
import tempfile
import shutil
import unittest
import email.utils
from contextlib import contextmanager
from scrapy.http import Response, HtmlResponse, Request
from scrapy.spiders import Spider
from scrapy.settings import Settings
from scrapy.exceptions import IgnoreRequest
from scrapy.utils.test import get_crawler
from scrapy.downloadermiddlewares.httpcache import HttpCacheMiddleware
class _BaseTest(unittest.TestCase):
storage_class = 'scrapy.extensions.httpcache.DbmCacheStorage'
policy_class = 'scrapy.extensions.httpcache.RFC2616Policy'
def setUp(self):
self.yesterday = email.utils.formatdate(time.time() - 86400)
self.today = email.utils.formatdate()
self.tomorrow = email.utils.formatdate(time.time() + 86400)
self.crawler = get_crawler(Spider)
self.spider = self.crawler._create_spider('example.com')
self.tmpdir = tempfile.mkdtemp()
self.request = Request('http://www.example.com',
headers={'User-Agent': 'test'})
self.response = Response('http://www.example.com',
headers={'Content-Type': 'text/html'},
body=b'test body',
status=202)
self.crawler.stats.open_spider(self.spider)
def tearDown(self):
self.crawler.stats.close_spider(self.spider, '')
shutil.rmtree(self.tmpdir)
def _get_settings(self, **new_settings):
settings = {
'HTTPCACHE_ENABLED': True,
'HTTPCACHE_DIR': self.tmpdir,
'HTTPCACHE_EXPIRATION_SECS': 1,
'HTTPCACHE_IGNORE_HTTP_CODES': [],
'HTTPCACHE_POLICY': self.policy_class,
'HTTPCACHE_STORAGE': self.storage_class,
}
settings.update(new_settings)
return Settings(settings)
@contextmanager
def _storage(self, **new_settings):
with self._middleware(**new_settings) as mw:
yield mw.storage
@contextmanager
def _policy(self, **new_settings):
with self._middleware(**new_settings) as mw:
yield mw.policy
@contextmanager
def _middleware(self, **new_settings):
settings = self._get_settings(**new_settings)
mw = HttpCacheMiddleware(settings, self.crawler.stats)
mw.spider_opened(self.spider)
try:
yield mw
finally:
mw.spider_closed(self.spider)
def assertEqualResponse(self, response1, response2):
self.assertEqual(response1.url, response2.url)
self.assertEqual(response1.status, response2.status)
self.assertEqual(response1.headers, response2.headers)
self.assertEqual(response1.body, response2.body)
def assertEqualRequest(self, request1, request2):
self.assertEqual(request1.url, request2.url)
self.assertEqual(request1.headers, request2.headers)
self.assertEqual(request1.body, request2.body)
def assertEqualRequestButWithCacheValidators(self, request1, request2):
self.assertEqual(request1.url, request2.url)
assert b'If-None-Match' not in request1.headers
assert b'If-Modified-Since' not in request1.headers
assert any(h in request2.headers for h in (b'If-None-Match', b'If-Modified-Since'))
self.assertEqual(request1.body, request2.body)
def test_dont_cache(self):
with self._middleware() as mw:
self.request.meta['dont_cache'] = True
mw.process_response(self.request, self.response, self.spider)
self.assertEqual(mw.storage.retrieve_response(self.spider, self.request), None)
with self._middleware() as mw:
self.request.meta['dont_cache'] = False
mw.process_response(self.request, self.response, self.spider)
if mw.policy.should_cache_response(self.response, self.request):
self.assertIsInstance(mw.storage.retrieve_response(self.spider, self.request), self.response.__class__)
class DefaultStorageTest(_BaseTest):
def test_storage(self):
with self._storage() as storage:
request2 = self.request.copy()
assert storage.retrieve_response(self.spider, request2) is None
storage.store_response(self.spider, self.request, self.response)
response2 = storage.retrieve_response(self.spider, request2)
assert isinstance(response2, HtmlResponse) # content-type header
self.assertEqualResponse(self.response, response2)
time.sleep(2) # wait for cache to expire
assert storage.retrieve_response(self.spider, request2) is None
def test_storage_never_expire(self):
with self._storage(HTTPCACHE_EXPIRATION_SECS=0) as storage:
assert storage.retrieve_response(self.spider, self.request) is None
storage.store_response(self.spider, self.request, self.response)
time.sleep(0.5) # give the chance to expire
assert storage.retrieve_response(self.spider, self.request)
class DbmStorageTest(DefaultStorageTest):
storage_class = 'scrapy.extensions.httpcache.DbmCacheStorage'
class DbmStorageWithCustomDbmModuleTest(DbmStorageTest):
dbm_module = 'tests.mocks.dummydbm'
def _get_settings(self, **new_settings):
new_settings.setdefault('HTTPCACHE_DBM_MODULE', self.dbm_module)
return super()._get_settings(**new_settings)
def test_custom_dbm_module_loaded(self):
# make sure our dbm module has been loaded
with self._storage() as storage:
self.assertEqual(storage.dbmodule.__name__, self.dbm_module)
class FilesystemStorageTest(DefaultStorageTest):
storage_class = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
class FilesystemStorageGzipTest(FilesystemStorageTest):
def _get_settings(self, **new_settings):
new_settings.setdefault('HTTPCACHE_GZIP', True)
return super()._get_settings(**new_settings)
class DummyPolicyTest(_BaseTest):
policy_class = 'scrapy.extensions.httpcache.DummyPolicy'
def test_middleware(self):
with self._middleware() as mw:
assert mw.process_request(self.request, self.spider) is None
mw.process_response(self.request, self.response, self.spider)
response = mw.process_request(self.request, self.spider)
assert isinstance(response, HtmlResponse)
self.assertEqualResponse(self.response, response)
assert 'cached' in response.flags
def test_different_request_response_urls(self):
with self._middleware() as mw:
req = Request('http://host.com/path')
res = Response('http://host2.net/test.html')
assert mw.process_request(req, self.spider) is None
mw.process_response(req, res, self.spider)
cached = mw.process_request(req, self.spider)
assert isinstance(cached, Response)
self.assertEqualResponse(res, cached)
assert 'cached' in cached.flags
def test_middleware_ignore_missing(self):
with self._middleware(HTTPCACHE_IGNORE_MISSING=True) as mw:
self.assertRaises(IgnoreRequest, mw.process_request, self.request, self.spider)
mw.process_response(self.request, self.response, self.spider)
response = mw.process_request(self.request, self.spider)
assert isinstance(response, HtmlResponse)
self.assertEqualResponse(self.response, response)
assert 'cached' in response.flags
def test_middleware_ignore_schemes(self):
# http responses are cached by default
req, res = Request('http://test.com/'), Response('http://test.com/')
with self._middleware() as mw:
assert mw.process_request(req, self.spider) is None
mw.process_response(req, res, self.spider)
cached = mw.process_request(req, self.spider)
assert isinstance(cached, Response), type(cached)
self.assertEqualResponse(res, cached)
assert 'cached' in cached.flags
# file response is not cached by default
req, res = Request('file:///tmp/t.txt'), Response('file:///tmp/t.txt')
with self._middleware() as mw:
assert mw.process_request(req, self.spider) is None
mw.process_response(req, res, self.spider)
assert mw.storage.retrieve_response(self.spider, req) is None
assert mw.process_request(req, self.spider) is None
# s3 scheme response is cached by default
req, res = Request('s3://bucket/key'), Response('http://bucket/key')
with self._middleware() as mw:
assert mw.process_request(req, self.spider) is None
mw.process_response(req, res, self.spider)
cached = mw.process_request(req, self.spider)
assert isinstance(cached, Response), type(cached)
self.assertEqualResponse(res, cached)
assert 'cached' in cached.flags
# ignore s3 scheme
req, res = Request('s3://bucket/key2'), Response('http://bucket/key2')
with self._middleware(HTTPCACHE_IGNORE_SCHEMES=['s3']) as mw:
assert mw.process_request(req, self.spider) is None
mw.process_response(req, res, self.spider)
assert mw.storage.retrieve_response(self.spider, req) is None
assert mw.process_request(req, self.spider) is None
def test_middleware_ignore_http_codes(self):
# test response is not cached
with self._middleware(HTTPCACHE_IGNORE_HTTP_CODES=[202]) as mw:
assert mw.process_request(self.request, self.spider) is None
mw.process_response(self.request, self.response, self.spider)
assert mw.storage.retrieve_response(self.spider, self.request) is None
assert mw.process_request(self.request, self.spider) is None
# test response is cached
with self._middleware(HTTPCACHE_IGNORE_HTTP_CODES=[203]) as mw:
mw.process_response(self.request, self.response, self.spider)
response = mw.process_request(self.request, self.spider)
assert isinstance(response, HtmlResponse)
self.assertEqualResponse(self.response, response)
assert 'cached' in response.flags
class RFC2616PolicyTest(DefaultStorageTest):
policy_class = 'scrapy.extensions.httpcache.RFC2616Policy'
def _process_requestresponse(self, mw, request, response):
result = None
try:
result = mw.process_request(request, self.spider)
if result:
assert isinstance(result, (Request, Response))
return result
else:
result = mw.process_response(request, response, self.spider)
assert isinstance(result, Response)
return result
except Exception:
print('Request', request)
print('Response', response)
print('Result', result)
raise
def test_request_cacheability(self):
res0 = Response(self.request.url, status=200,
headers={'Expires': self.tomorrow})
req0 = Request('http://example.com')
req1 = req0.replace(headers={'Cache-Control': 'no-store'})
req2 = req0.replace(headers={'Cache-Control': 'no-cache'})
with self._middleware() as mw:
# response for a request with no-store must not be cached
res1 = self._process_requestresponse(mw, req1, res0)
self.assertEqualResponse(res1, res0)
assert mw.storage.retrieve_response(self.spider, req1) is None
# Re-do request without no-store and expect it to be cached
res2 = self._process_requestresponse(mw, req0, res0)
assert 'cached' not in res2.flags
res3 = mw.process_request(req0, self.spider)
assert 'cached' in res3.flags
self.assertEqualResponse(res2, res3)
# request with no-cache directive must not return cached response
# but it allows new response to be stored
res0b = res0.replace(body=b'foo')
res4 = self._process_requestresponse(mw, req2, res0b)
self.assertEqualResponse(res4, res0b)
assert 'cached' not in res4.flags
res5 = self._process_requestresponse(mw, req0, None)
self.assertEqualResponse(res5, res0b)
assert 'cached' in res5.flags
def test_response_cacheability(self):
responses = [
# 304 is not cacheable no matter what servers sends
(False, 304, {}),
(False, 304, {'Last-Modified': self.yesterday}),
(False, 304, {'Expires': self.tomorrow}),
(False, 304, {'Etag': 'bar'}),
(False, 304, {'Cache-Control': 'max-age=3600'}),
# Always obey no-store cache control
(False, 200, {'Cache-Control': 'no-store'}),
(False, 200, {'Cache-Control': 'no-store, max-age=300'}), # invalid
(False, 200, {'Cache-Control': 'no-store', 'Expires': self.tomorrow}), # invalid
# Ignore responses missing expiration and/or validation headers
(False, 200, {}),
(False, 302, {}),
(False, 307, {}),
(False, 404, {}),
# Cache responses with expiration and/or validation headers
(True, 200, {'Last-Modified': self.yesterday}),
(True, 203, {'Last-Modified': self.yesterday}),
(True, 300, {'Last-Modified': self.yesterday}),
(True, 301, {'Last-Modified': self.yesterday}),
(True, 308, {'Last-Modified': self.yesterday}),
(True, 401, {'Last-Modified': self.yesterday}),
(True, 404, {'Cache-Control': 'public, max-age=600'}),
(True, 302, {'Expires': self.tomorrow}),
(True, 200, {'Etag': 'foo'}),
]
with self._middleware() as mw:
for idx, (shouldcache, status, headers) in enumerate(responses):
req0 = Request(f'http://example-{idx}.com')
res0 = Response(req0.url, status=status, headers=headers)
res1 = self._process_requestresponse(mw, req0, res0)
res304 = res0.replace(status=304)
res2 = self._process_requestresponse(mw, req0, res304 if shouldcache else res0)
self.assertEqualResponse(res1, res0)
self.assertEqualResponse(res2, res0)
resc = mw.storage.retrieve_response(self.spider, req0)
if shouldcache:
self.assertEqualResponse(resc, res1)
assert 'cached' in res2.flags and res2.status != 304
else:
self.assertFalse(resc)
assert 'cached' not in res2.flags
# cache unconditionally unless response contains no-store or is a 304
with self._middleware(HTTPCACHE_ALWAYS_STORE=True) as mw:
for idx, (_, status, headers) in enumerate(responses):
shouldcache = 'no-store' not in headers.get('Cache-Control', '') and status != 304
req0 = Request(f'http://example2-{idx}.com')
res0 = Response(req0.url, status=status, headers=headers)
res1 = self._process_requestresponse(mw, req0, res0)
res304 = res0.replace(status=304)
res2 = self._process_requestresponse(mw, req0, res304 if shouldcache else res0)
self.assertEqualResponse(res1, res0)
self.assertEqualResponse(res2, res0)
resc = mw.storage.retrieve_response(self.spider, req0)
if shouldcache:
self.assertEqualResponse(resc, res1)
assert 'cached' in res2.flags and res2.status != 304
else:
self.assertFalse(resc)
assert 'cached' not in res2.flags
def test_cached_and_fresh(self):
sampledata = [
(200, {'Date': self.yesterday, 'Expires': self.tomorrow}),
(200, {'Date': self.yesterday, 'Cache-Control': 'max-age=86405'}),
(200, {'Age': '299', 'Cache-Control': 'max-age=300'}),
# Obey max-age if present over any others
(200, {'Date': self.today,
'Age': '86405',
'Cache-Control': 'max-age=' + str(86400 * 3),
'Expires': self.yesterday,
'Last-Modified': self.yesterday,
}),
# obey Expires if max-age is not present
(200, {'Date': self.yesterday,
'Age': '86400',
'Cache-Control': 'public',
'Expires': self.tomorrow,
'Last-Modified': self.yesterday,
}),
# Default missing Date header to right now
(200, {'Expires': self.tomorrow}),
# Firefox - Expires if age is greater than 10% of (Date - Last-Modified)
(200, {'Date': self.today, 'Last-Modified': self.yesterday, 'Age': str(86400 / 10 - 1)}),
# Firefox - Set one year maxage to permanent redirects missing expiration info
(300, {}), (301, {}), (308, {}),
]
with self._middleware() as mw:
for idx, (status, headers) in enumerate(sampledata):
req0 = Request(f'http://example-{idx}.com')
res0 = Response(req0.url, status=status, headers=headers)
# cache fresh response
res1 = self._process_requestresponse(mw, req0, res0)
self.assertEqualResponse(res1, res0)
assert 'cached' not in res1.flags
# return fresh cached response without network interaction
res2 = self._process_requestresponse(mw, req0, None)
self.assertEqualResponse(res1, res2)
assert 'cached' in res2.flags
# validate cached response if request max-age set as 0
req1 = req0.replace(headers={'Cache-Control': 'max-age=0'})
res304 = res0.replace(status=304)
assert mw.process_request(req1, self.spider) is None
res3 = self._process_requestresponse(mw, req1, res304)
self.assertEqualResponse(res1, res3)
assert 'cached' in res3.flags
def test_cached_and_stale(self):
sampledata = [
(200, {'Date': self.today, 'Expires': self.yesterday}),
(200, {'Date': self.today, 'Expires': self.yesterday, 'Last-Modified': self.yesterday}),
(200, {'Expires': self.yesterday}),
(200, {'Expires': self.yesterday, 'ETag': 'foo'}),
(200, {'Expires': self.yesterday, 'Last-Modified': self.yesterday}),
(200, {'Expires': self.tomorrow, 'Age': '86405'}),
(200, {'Cache-Control': 'max-age=86400', 'Age': '86405'}),
# no-cache forces expiration, also revalidation if validators exists
(200, {'Cache-Control': 'no-cache'}),
(200, {'Cache-Control': 'no-cache', 'ETag': 'foo'}),
(200, {'Cache-Control': 'no-cache', 'Last-Modified': self.yesterday}),
(200, {'Cache-Control': 'no-cache,must-revalidate', 'Last-Modified': self.yesterday}),
(200, {'Cache-Control': 'must-revalidate', 'Expires': self.yesterday, 'Last-Modified': self.yesterday}),
(200, {'Cache-Control': 'max-age=86400,must-revalidate', 'Age': '86405'}),
]
with self._middleware() as mw:
for idx, (status, headers) in enumerate(sampledata):
req0 = Request(f'http://example-{idx}.com')
res0a = Response(req0.url, status=status, headers=headers)
# cache expired response
res1 = self._process_requestresponse(mw, req0, res0a)
self.assertEqualResponse(res1, res0a)
assert 'cached' not in res1.flags
# Same request but as cached response is stale a new response must
# be returned
res0b = res0a.replace(body=b'bar')
res2 = self._process_requestresponse(mw, req0, res0b)
self.assertEqualResponse(res2, res0b)
assert 'cached' not in res2.flags
cc = headers.get('Cache-Control', '')
# Previous response expired too, subsequent request to same
# resource must revalidate and succeed on 304 if validators
# are present
if 'ETag' in headers or 'Last-Modified' in headers:
res0c = res0b.replace(status=304)
res3 = self._process_requestresponse(mw, req0, res0c)
self.assertEqualResponse(res3, res0b)
assert 'cached' in res3.flags
# get cached response on server errors unless must-revalidate
# in cached response
res0d = res0b.replace(status=500)
res4 = self._process_requestresponse(mw, req0, res0d)
if 'must-revalidate' in cc:
assert 'cached' not in res4.flags
self.assertEqualResponse(res4, res0d)
else:
assert 'cached' in res4.flags
self.assertEqualResponse(res4, res0b)
# Requests with max-stale can fetch expired cached responses
# unless cached response has must-revalidate
req1 = req0.replace(headers={'Cache-Control': 'max-stale'})
res5 = self._process_requestresponse(mw, req1, res0b)
self.assertEqualResponse(res5, res0b)
if 'no-cache' in cc or 'must-revalidate' in cc:
assert 'cached' not in res5.flags
else:
assert 'cached' in res5.flags
def test_process_exception(self):
with self._middleware() as mw:
res0 = Response(self.request.url, headers={'Expires': self.yesterday})
req0 = Request(self.request.url)
self._process_requestresponse(mw, req0, res0)
for e in mw.DOWNLOAD_EXCEPTIONS:
# Simulate encountering an error on download attempts
assert mw.process_request(req0, self.spider) is None
res1 = mw.process_exception(req0, e('foo'), self.spider)
# Use cached response as recovery
assert 'cached' in res1.flags
self.assertEqualResponse(res0, res1)
# Do not use cached response for unhandled exceptions
mw.process_request(req0, self.spider)
assert mw.process_exception(req0, Exception('foo'), self.spider) is None
def test_ignore_response_cache_controls(self):
sampledata = [
(200, {'Date': self.yesterday, 'Expires': self.tomorrow}),
(200, {'Date': self.yesterday, 'Cache-Control': 'no-store,max-age=86405'}),
(200, {'Age': '299', 'Cache-Control': 'max-age=300,no-cache'}),
(300, {'Cache-Control': 'no-cache'}),
(200, {'Expires': self.tomorrow, 'Cache-Control': 'no-store'}),
]
with self._middleware(HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS=['no-cache', 'no-store']) as mw:
for idx, (status, headers) in enumerate(sampledata):
req0 = Request(f'http://example-{idx}.com')
res0 = Response(req0.url, status=status, headers=headers)
# cache fresh response
res1 = self._process_requestresponse(mw, req0, res0)
self.assertEqualResponse(res1, res0)
assert 'cached' not in res1.flags
# return fresh cached response without network interaction
res2 = self._process_requestresponse(mw, req0, None)
self.assertEqualResponse(res1, res2)
assert 'cached' in res2.flags
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.http.servlet import (
RestServlet, parse_string, parse_integer, parse_boolean
)
from synapse.handlers.sync import SyncConfig
from synapse.types import StreamToken
from synapse.events.utils import (
serialize_event, format_event_for_client_v2_without_event_id,
)
from synapse.api.filtering import Filter
from ._base import client_v2_pattern
import logging
logger = logging.getLogger(__name__)
class SyncRestServlet(RestServlet):
"""
GET parameters::
timeout(int): How long to wait for new events in milliseconds.
limit(int): Maxiumum number of events per room to return.
gap(bool): Create gaps the message history if limit is exceeded to
ensure that the client has the most recent messages. Defaults to
"true".
sort(str,str): tuple of sort key (e.g. "timeline") and direction
(e.g. "asc", "desc"). Defaults to "timeline,asc".
since(batch_token): Batch token when asking for incremental deltas.
set_presence(str): What state the device presence should be set to.
default is "online".
backfill(bool): Should the HS request message history from other
servers. This may take a long time making it unsuitable for clients
expecting a prompt response. Defaults to "true".
filter(filter_id): A filter to apply to the events returned.
filter_*: Filter override parameters.
Response JSON::
{
"next_batch": // batch token for the next /sync
"private_user_data": // private events for this user.
"public_user_data": // public events for all users including the
// public events for this user.
"rooms": [{ // List of rooms with updates.
"room_id": // Id of the room being updated
"limited": // Was the per-room event limit exceeded?
"published": // Is the room published by our HS?
"event_map": // Map of EventID -> event JSON.
"events": { // The recent events in the room if gap is "true"
// otherwise the next events in the room.
"batch": [] // list of EventIDs in the "event_map".
"prev_batch": // back token for getting previous events.
}
"state": [] // list of EventIDs updating the current state to
// be what it should be at the end of the batch.
"ephemeral": []
}]
}
"""
PATTERN = client_v2_pattern("/sync$")
ALLOWED_SORT = set(["timeline,asc", "timeline,desc"])
ALLOWED_PRESENCE = set(["online", "offline", "idle"])
def __init__(self, hs):
super(SyncRestServlet, self).__init__()
self.auth = hs.get_auth()
self.sync_handler = hs.get_handlers().sync_handler
self.clock = hs.get_clock()
self.filtering = hs.get_filtering()
@defer.inlineCallbacks
def on_GET(self, request):
user, client = yield self.auth.get_user_by_req(request)
timeout = parse_integer(request, "timeout", default=0)
limit = parse_integer(request, "limit", required=True)
gap = parse_boolean(request, "gap", default=True)
sort = parse_string(
request, "sort", default="timeline,asc",
allowed_values=self.ALLOWED_SORT
)
since = parse_string(request, "since")
set_presence = parse_string(
request, "set_presence", default="online",
allowed_values=self.ALLOWED_PRESENCE
)
backfill = parse_boolean(request, "backfill", default=False)
filter_id = parse_string(request, "filter", default=None)
logger.info(
"/sync: user=%r, timeout=%r, limit=%r, gap=%r, sort=%r, since=%r,"
" set_presence=%r, backfill=%r, filter_id=%r" % (
user, timeout, limit, gap, sort, since, set_presence,
backfill, filter_id
)
)
# TODO(mjark): Load filter and apply overrides.
try:
filter = yield self.filtering.get_user_filter(
user.localpart, filter_id
)
except:
filter = Filter({})
# filter = filter.apply_overrides(http_request)
# if filter.matches(event):
# # stuff
sync_config = SyncConfig(
user=user,
client_info=client,
gap=gap,
limit=limit,
sort=sort,
backfill=backfill,
filter=filter,
)
if since is not None:
since_token = StreamToken.from_string(since)
else:
since_token = None
sync_result = yield self.sync_handler.wait_for_sync_for_user(
sync_config, since_token=since_token, timeout=timeout
)
time_now = self.clock.time_msec()
response_content = {
"public_user_data": self.encode_user_data(
sync_result.public_user_data, filter, time_now
),
"private_user_data": self.encode_user_data(
sync_result.private_user_data, filter, time_now
),
"rooms": self.encode_rooms(
sync_result.rooms, filter, time_now, client.token_id
),
"next_batch": sync_result.next_batch.to_string(),
}
defer.returnValue((200, response_content))
def encode_user_data(self, events, filter, time_now):
return events
def encode_rooms(self, rooms, filter, time_now, token_id):
return [
self.encode_room(room, filter, time_now, token_id)
for room in rooms
]
@staticmethod
def encode_room(room, filter, time_now, token_id):
event_map = {}
state_events = filter.filter_room_state(room.state)
recent_events = filter.filter_room_events(room.events)
state_event_ids = []
recent_event_ids = []
for event in state_events:
# TODO(mjark): Respect formatting requirements in the filter.
event_map[event.event_id] = serialize_event(
event, time_now, token_id=token_id,
event_format=format_event_for_client_v2_without_event_id,
)
state_event_ids.append(event.event_id)
for event in recent_events:
# TODO(mjark): Respect formatting requirements in the filter.
event_map[event.event_id] = serialize_event(
event, time_now, token_id=token_id,
event_format=format_event_for_client_v2_without_event_id,
)
recent_event_ids.append(event.event_id)
result = {
"room_id": room.room_id,
"event_map": event_map,
"events": {
"batch": recent_event_ids,
"prev_batch": room.prev_batch.to_string(),
},
"state": state_event_ids,
"limited": room.limited,
"published": room.published,
"ephemeral": room.ephemeral,
}
return result
def register_servlets(hs, http_server):
SyncRestServlet(hs).register(http_server)
|
|
"""
tests.tests_reference
~~~~~~~~~~~~~~~~~~~~~
"""
from jsonspec.pointer import extract, stage
from . import TestMappingType, TestSequenceType
from jsonspec.pointer import RefError, DocumentPointer, Pointer
from jsonspec.pointer import exceptions as events
from . import TestCase
class TestPointer(TestCase):
document = {
'foo': ['bar', 'baz', {
'$ref': 'obj2#/sub'
}]
}
def test_simple(self):
assert 'baz' == extract(self.document, '/foo/1')
def test_with_reference(self):
with self.assertRaises(RefError):
assert 'quux' == extract(self.document, '/foo/2')
def test_bypass_reference(self):
assert 'obj2#/sub' == extract(self.document, '/foo/2/$ref',
bypass_ref=True)
def test_compare(self):
assert '/foo' == Pointer('/foo')
assert Pointer('/foo') != Pointer('/bar')
tokens = Pointer('//a~1b/c%d/e^f/g|h/i\\j/k\"l/ /m~0n').tokens
assert tokens == ['', 'a/b', 'c%d', 'e^f', 'g|h', 'i\\j', 'k\"l', ' ', 'm~n'] # noqa
def test_iteration(self):
obj = self.document
for token in Pointer('/foo/1'):
obj = token.extract(obj)
assert 'baz' == obj
def test_document(self):
dp = DocumentPointer('example.com#/foo')
assert not dp.is_inner()
assert dp.document == 'example.com'
assert dp.pointer == '/foo'
def test_inner_document(self):
dp = DocumentPointer('#/foo')
assert dp.is_inner()
assert dp.document == ''
assert dp.pointer == '/foo'
class TestSequence(TestCase):
document = ['foo', 'bar', {'$ref': 'baz'}]
collections_document = TestSequenceType(['foo', 'bar', TestMappingType({'$ref': 'baz'})])
def test_sequence(self):
assert 'bar' == extract(self.document, '/1')
def test_last_element(self):
try:
extract(self.document, '/-')
self.fail('last element needed')
except events.LastElement as event:
assert self.document == event.obj
def test_ref(self):
try:
extract(self.document, '/2')
self.fail('last element needed')
except events.RefError as event:
assert self.document[2] == event.obj
def test_bypass_ref(self):
assert self.document[2] == extract(self.document, '/2',
bypass_ref=True)
def test_out_of_range(self):
try:
extract(self.document, '/3')
self.fail('last element needed')
except events.OutOfRange as event:
assert self.document == event.obj
def test_wrong_type(self):
try:
extract(self.document, '/foo')
self.fail('last element needed')
except events.WrongType as event:
assert self.document == event.obj
class TestSequenceType(TestCase):
document = TestSequenceType(['foo', 'bar', TestMappingType({'$ref': 'baz'})])
def test_sequence(self):
assert 'bar' == extract(self.document, '/1')
def test_last_element(self):
try:
extract(self.document, '/-')
self.fail('last element needed')
except events.LastElement as event:
assert self.document == event.obj
def test_ref(self):
try:
extract(self.document, '/2')
self.fail('last element needed')
except events.RefError as event:
assert self.document[2] == event.obj
def test_bypass_ref(self):
assert self.document[2] == extract(self.document, '/2',
bypass_ref=True)
def test_out_of_range(self):
try:
extract(self.document, '/3')
self.fail('last element needed')
except events.OutOfRange as event:
assert self.document == event.obj
def test_wrong_type(self):
try:
extract(self.document, '/foo')
self.fail('last element needed')
except events.WrongType as event:
assert self.document == event.obj
class TestMapping(TestCase):
document = {'foo': 42, 'bar': {'$ref': 'baz'}, 4: True}
def test_mapping(self):
assert 42 == extract(self.document, '/foo')
def test_cast(self):
assert self.document[4] == extract(self.document, '/4')
def test_ref(self):
try:
extract(self.document, '/bar')
self.fail('last element needed')
except events.RefError as event:
assert self.document['bar'] == event.obj
def test_bypass_ref(self):
assert self.document['bar'] == extract(self.document, '/bar',
bypass_ref=True)
def test_out_of_bound(self):
try:
extract(self.document, '/3')
self.fail('out of bound')
except events.OutOfBounds as event:
assert self.document == event.obj
try:
extract(self.document, '/quux')
self.fail('out of bound')
except events.OutOfBounds as event:
assert self.document == event.obj
class TestMappingType(TestCase):
document = TestMappingType({'foo': 42, 'bar': TestMappingType({'$ref': 'baz'}), 4: True})
def test_mapping(self):
assert 42 == extract(self.document, '/foo')
def test_cast(self):
assert self.document[4] == extract(self.document, '/4')
def test_ref(self):
try:
extract(self.document, '/bar')
self.fail('last element needed')
except events.RefError as event:
assert self.document['bar'] == event.obj
def test_bypass_ref(self):
assert self.document['bar'] == extract(self.document, '/bar',
bypass_ref=True)
def test_out_of_bound(self):
try:
extract(self.document, '/3')
self.fail('out of bound')
except events.OutOfBounds as event:
assert self.document == event.obj
try:
extract(self.document, '/quux')
self.fail('out of bound')
except events.OutOfBounds as event:
assert self.document == event.obj
class TestRelative(object):
document = stage({
'foo': ['bar', 'baz'],
'highly': {
'nested': {
'objects': True
}
}
})
def test_relative_1(self):
baz_relative = extract(self.document, '/foo/1')
# staged
assert extract(baz_relative, '0') == 'baz'
assert extract(baz_relative, '1/0') == 'bar'
assert extract(baz_relative, '2/highly/nested/objects') == True
# keys
assert extract(baz_relative, '0#') == 1
assert extract(baz_relative, '1#') == 'foo'
# unstage
assert extract(baz_relative, '0').obj == 'baz'
assert extract(baz_relative, '1/0').obj == 'bar'
assert extract(baz_relative, '2/highly/nested/objects').obj == True
def test_relative_2(self):
nested_relative = extract(self.document, '/highly/nested')
assert extract(nested_relative, '0/objects') == True
assert extract(nested_relative, '1/nested/objects') == True
assert extract(nested_relative, '2/foo/0') == 'bar'
assert extract(nested_relative, '0#') == 'nested'
assert extract(nested_relative, '1#') == 'highly'
assert extract(nested_relative, '0/objects').obj == True
assert extract(nested_relative, '1/nested/objects').obj == True
assert extract(nested_relative, '2/foo/0').obj == 'bar'
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cudnn RNN operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
CUDNN_RNN_UNIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION
CUDNN_RNN_BIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION
CUDNN_LSTM = cudnn_rnn_ops.CUDNN_LSTM
CUDNN_GRU = cudnn_rnn_ops.CUDNN_GRU
CUDNN_RNN_RELU = cudnn_rnn_ops.CUDNN_RNN_RELU
CUDNN_RNN_TANH = cudnn_rnn_ops.CUDNN_RNN_TANH
# Half for cell input, half for hidden states.
CUDNN_LSTM_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_LSTM_PARAMS_PER_LAYER
CUDNN_GRU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_GRU_PARAMS_PER_LAYER
CUDNN_RNN_TANH_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_TANH_PARAMS_PER_LAYER
CUDNN_RNN_RELU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_RELU_PARAMS_PER_LAYER
CUDNN_INPUT_LINEAR_MODE = cudnn_rnn_ops.CUDNN_INPUT_LINEAR_MODE
CUDNN_INPUT_SKIP_MODE = cudnn_rnn_ops.CUDNN_INPUT_SKIP_MODE
CUDNN_INPUT_AUTO_MODE = cudnn_rnn_ops.CUDNN_INPUT_AUTO_MODE
__all__ = ["CudnnLSTM", "CudnnGRU", "CudnnRNNTanh", "CudnnRNNRelu"]
class _CudnnRNN(base_layer.Layer):
# pylint:disable=line-too-long
"""Abstract class for RNN layers with Cudnn implementation.
Cudnn RNNs have two major differences from other platform-independent RNNs tf
provides:
* Cudnn LSTM and GRU are mathematically different from their tf counterparts.
(e.g. @{tf.contrib.rnn.LSTMBlockCell} and @{tf.nn.rnn_cell.GRUCell}.
* Cudnn-trained checkpoints are not directly compatible with tf RNNs:
* They use a single opaque parameter buffer for the entire (possibly)
multi-layer multi-directional RNN; Whereas tf RNN weights are per-cell and
layer.
* The size and layout of the parameter buffers may change between
CUDA/CuDNN/GPU generations. Because of that, the opaque parameter variable
does not have a static shape and is not partitionable. Instead of using
partitioning to alleviate the PS's traffic load, try building a
multi-tower model and do gradient aggregation locally within the host
before updating the PS. See https://www.tensorflow.org/performance/performance_models#parameter_server_variables
for a detailed performance guide.
Consequently, if one plans to use Cudnn trained models on both GPU and CPU
for inference and training, one needs to:
* Create a CudnnOpaqueParamsSaveable subclass object to save RNN params in
canonical format. (This is done for you automatically during layer building
process.)
* When not using a Cudnn RNN class, use CudnnCompatibleRNN classes to load the
checkpoints. These classes are platform-independent and perform the same
computation as Cudnn for training and inference.
Similarly, CudnnCompatibleRNN-trained checkpoints can be loaded by CudnnRNN
classes seamlessly.
Below is a typical workflow(using LSTM as an example):
for detailed performance guide.
# Use Cudnn-trained checkpoints with CudnnCompatibleRNNs
```python
with tf.Graph().as_default():
lstm = CudnnLSTM(num_layers, num_units, direction, ...)
outputs, output_states = lstm(inputs, initial_states, training=True)
# If user plans to delay calling the cell with inputs, one can do
# lstm.build(input_shape)
saver = Saver()
# training subgraph
...
# Once in a while save the model.
saver.save(save_path)
# Inference subgraph for unidirectional RNN on, e.g., CPU or mobile.
with tf.Graph().as_default():
single_cell = lambda: tf.contrib.cudnn_rnn.CudnnCompatibleLSTM(num_units)
# NOTE: Even if there's only one layer, the cell needs to be wrapped in
# MultiRNNCell.
cell = tf.nn.rnn_cell.MultiRNNCell(
[single_cell() for _ in range(num_layers)])
# Leave the scope arg unset.
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state, ...)
saver = Saver()
# Create session
sess = ...
# Restores
saver.restore(sess, save_path)
# Inference subgraph for bidirectional RNN
with tf.Graph().as_default():
single_cell = lambda: tf.contrib.cudnn_rnn.CudnnCompatibleLSTM(num_units)
cells_fw = [single_cell() for _ in range(num_layers)]
cells_bw = [single_cell() for _ in range(num_layers)]
# Leave the scope arg unset.
(outputs, output_state_fw,
output_state_bw) = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw, cells_bw, inputs, ...)
saver = Saver()
# Create session
sess = ...
# Restores
saver.restore(sess, save_path)
```
"""
# pylint:enable=line-too-long
# TODO(allenl): Document object-based saving and checkpoint compatibility once
# it's implemented for more cuDNN Layers.
# The following are constants defined by subclasses.
# Type of RNN cell.
_rnn_mode = None
# Number of cell weights(or biases) per layer.
_num_params_per_layer = None
# Custom SaveableObject class for the CudnnRNN class.
_saveable_cls = None
def __init__(self,
num_layers,
num_units,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
seed=None,
dtype=dtypes.float32,
kernel_initializer=None,
bias_initializer=None,
name=None):
"""Creates a CudnnRNN model from model spec.
Args:
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It can be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Can be either
'unidirectional' or 'bidirectional'
dropout: dropout rate, a number between [0, 1]. Dropout is applied between
each layer (no dropout is applied for a model with a single layer).
When set to 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
for behavior.
dtype: tf.float16, tf.float32 or tf.float64
kernel_initializer: starting value to initialize the weight.
bias_initializer: starting value to initialize the bias
(default is all zeros).
name: VariableScope for the created subgraph; defaults to class name.
This only serves the default scope if later no scope is specified when
invoking __call__().
Raises:
ValueError: if direction is invalid. Or dtype is not supported.
"""
super(_CudnnRNN, self).__init__(dtype=dtype, name=name)
cudnn_rnn_ops.check_direction(direction)
cudnn_rnn_ops.check_input_mode(input_mode)
if dtype not in [dtypes.float16, dtypes.float32, dtypes.float64]:
raise ValueError(
"Only support float16, float32, float64, provided %s" % dtype)
# Layer self.dtype is type name, the original DType object is kept here.
self._plain_dtype = dtype
self._num_layers = num_layers
self._num_units = num_units
self._input_mode = input_mode
self._direction = direction
self._dropout = dropout
self._seed = seed
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
# Init input_size to None, which will be set after build().
self._input_size = None
self._saveable = None
@property
def num_layers(self):
return self._num_layers
@property
def num_units(self):
return self._num_units
@property
def input_mode(self):
"""Input mode of first layer.
Indicates whether there is a linear projection between the input and the
actual computation before the first layer. It can be
* 'linear_input': (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior)
* 'skip_input': 'skip_input' is only allowed when input_size == num_units.
* 'auto_select'. implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
Returns:
'linear_input', 'skip_input' or 'auto_select'.
"""
return self._input_mode
@property
def input_size(self):
if not self._input_size:
raise ValueError(
"\'input_size\' is unknown since layer has not been built.")
return self._input_size
@property
def rnn_mode(self):
"""Type of RNN cell used.
Returns:
`lstm`, `gru`, `rnn_relu` or `rnn_tanh`.
"""
return self._rnn_mode
@property
def direction(self):
"""Returns `unidirectional` or `bidirectional`."""
return self._direction
@property
def num_dirs(self):
return 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2
@property
def saveable(self):
return self._saveable
@property
def canonical_weight_shapes(self):
"""Shapes of Cudnn canonical weight tensors."""
if not self._input_size:
raise RuntimeError(
"%s.canonical_weight_shapes invoked before input shape is known" %
type(self).__name__)
shapes = []
for i in range(self._num_layers):
shapes.extend(self._canonical_weight_shape(i))
return shapes
@property
def canonical_bias_shapes(self):
"""Shapes of Cudnn canonical bias tensors."""
return self._canonical_bias_shape(0) * self._num_layers
def _update_trainable_weights(self, getter, *args, **kwargs):
"""Custom getter for layer variables."""
# Add variables to layer's `(non_)trainable_weights` list(s).
variable = getter(*args, **kwargs)
trainable = kwargs.get("trainable", True)
if trainable and variable not in self._trainable_weights:
self._trainable_weights.append(variable)
elif not trainable and variable not in self._non_trainable_weights:
self._non_trainable_weights.append(variable)
return variable
def build(self, input_shape):
"""Create variables of the Cudnn RNN.
It can be called manually before `__call__()` or automatically through
`__call__()`. In the former case, subsequent `__call__()`s will skip
creating variables.
Args:
input_shape: network input tensor shape, a python list or a TensorShape
object with 3 dimensions.
Raises:
ValueError: if input_shape has wrong dimension or unknown 3rd dimension.
"""
if self.built:
return
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape.ndims != 3:
raise ValueError("Expecting input_shape with 3 dims, got %d" %
input_shape.ndims)
if input_shape[-1].value is None:
raise ValueError("The last dimension of the inputs to `CudnnRNN` "
"should be defined. Found `None`.")
self._input_size = input_shape[-1].value
self.input_spec = base_layer.InputSpec(ndim=3, axes={-1: self._input_size})
self._set_scope(None)
# Not using base class `add_variable()` since the it calls
# `tf.get_variable()` with a callable initializer whereas here with a
# tensor. The difference is mandated to support forward-compatibility with
# Cudnn.
with vs.variable_scope(
self._scope,
reuse=self.built,
custom_getter=self._update_trainable_weights):
if self._kernel_initializer is None:
self._kernel_initializer = init_ops.glorot_uniform_initializer(
seed=self._seed, dtype=self._plain_dtype)
if self._bias_initializer is None:
self._bias_initializer = init_ops.constant_initializer(
0.0, dtype=self._plain_dtype)
weights = [
self._kernel_initializer(sp, dtype=self._plain_dtype)
for sp in self.canonical_weight_shapes
]
biases = [
self._bias_initializer(sp, dtype=self._plain_dtype)
for sp in self.canonical_bias_shapes
]
opaque_params_t = self._canonical_to_opaque(weights, biases)
if vs.get_variable_scope().partitioner is not None:
logging.warn(
"Partitioner is not supported for Cudnn RNN layer variables, using "
"it will create forward-compatibility issues with future "
"CUDA/CuDNN generations.")
# Initialize opaque params with a tensor.
self.kernel = vs.get_variable(
"opaque_kernel", dtype=self._plain_dtype,
initializer=opaque_params_t, validate_shape=False)
# Create saveable in the outer scope of the cudnn subgraph, such that
# alternative subgraph with platform-independent rnn cells can load the
# checkpoints directly.
if not (self.built or vs.get_variable_scope().reuse is True):
self._create_saveable()
self.built = True
def _gather_saveables_for_checkpoint(self):
raise NotImplementedError(
"This cell does not yet support object-based saving. File a feature "
"request if this limitation bothers you.")
def call(self, inputs, initial_state=None, training=True):
"""Runs the forward step for the RNN model.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`.
initial_state: a tuple of tensor(s) of shape
`[num_layers * num_dirs, batch_size, num_units]`. If not provided, use
zero initial states. The tuple size is 2 for LSTM and 1 for other RNNs.
training: whether this operation will be used in training or inference.
Returns:
output: a tensor of shape `[time_len, batch_size, num_dirs * num_units]`.
It is a `concat([fwd_output, bak_output], axis=2)`.
output_states: a tuple of tensor(s) of the same shape and structure as
`initial_state`.
Raises:
ValueError: initial_state is not a tuple.
"""
if initial_state is not None and not isinstance(initial_state, tuple):
raise ValueError("Invalid initial_state type: %s, expecting tuple.",
type(initial_state))
dtype = self.dtype
inputs = ops.convert_to_tensor(inputs, dtype=dtype)
batch_size = array_ops.shape(inputs)[1]
if initial_state is None:
initial_state = self._zero_state(batch_size)
if self._rnn_mode == CUDNN_LSTM:
h, c = initial_state # pylint:disable=unbalanced-tuple-unpacking,unpacking-non-sequence
else:
h, = initial_state # pylint:disable=unbalanced-tuple-unpacking,unpacking-non-sequence
h = ops.convert_to_tensor(h, dtype=dtype)
if self._rnn_mode == CUDNN_LSTM:
c = ops.convert_to_tensor(c, dtype=dtype)
else:
# For model that doesn't take input_c, replace with a dummy tensor.
c = array_ops.constant([], dtype=dtype)
outputs, (output_h, output_c) = self._forward(inputs, h, c, self.kernel,
training)
if self._rnn_mode == CUDNN_LSTM:
return outputs, (output_h, output_c)
else:
return outputs, (output_h,)
def state_shape(self, batch_size):
raise NotImplementedError
def _zero_state(self, batch_size):
res = []
for sp in self.state_shape(batch_size):
res.append(array_ops.zeros(sp, dtype=self.dtype))
return tuple(res)
def _canonical_weight_shape(self, layer):
"""Shapes of Cudnn canonical weight tensors for given layer."""
if layer < 0 or layer >= self._num_layers:
raise ValueError("\'layer\' is not valid, got %s, expecting [%d, %d]" %
(layer, 0, self._num_layers-1))
if not self._input_size:
raise RuntimeError(
"%s._canonical_weight_shape invoked before input shape is known" %
type(self).__name__)
input_size = self._input_size
num_units = self._num_units
num_gates = self._num_params_per_layer // 2
is_bidi = self._direction == CUDNN_RNN_BIDIRECTION
if layer == 0:
wts_applied_on_inputs = [(num_units, input_size)] * num_gates
else:
if is_bidi:
wts_applied_on_inputs = [(num_units, 2 * num_units)] * num_gates
else:
wts_applied_on_inputs = [(num_units, num_units)] * num_gates
wts_applied_on_hidden_states = [(num_units, num_units)] * num_gates
tf_wts = wts_applied_on_inputs + wts_applied_on_hidden_states
return tf_wts if not is_bidi else tf_wts * 2
def _canonical_bias_shape(self, unused_layer):
"""Shapes of Cudnn canonical bias tensors for given layer."""
num_dirs = 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2
return [[self._num_units]] * num_dirs * self._num_params_per_layer
def _canonical_to_opaque(self, cu_weights, cu_biases):
if not self._input_size:
raise RuntimeError(
"%s._canonical_to_opaque invoked before input shape is known" %
type(self).__name__)
with ops.device("/gpu:0"):
return cudnn_rnn_ops.cudnn_rnn_canonical_to_opaque_params(
rnn_mode=self._rnn_mode,
num_layers=self._num_layers,
num_units=self._num_units,
input_size=self._input_size,
weights=cu_weights,
biases=cu_biases,
input_mode=self._input_mode,
seed=self._seed,
dropout=self._dropout,
direction=self._direction)
def _forward(self, inputs, h, c, opaque_params, training):
output, output_h, output_c = cudnn_rnn_ops._cudnn_rnn( # pylint:disable=protected-access
inputs,
h,
c,
opaque_params,
training,
self._rnn_mode,
input_mode=self._input_mode,
direction=self._direction,
dropout=self._dropout,
seed=self._seed)
return output, (output_h, output_c)
def _create_saveable(self):
"""Create custom saveable for the Cudnn layer.
Called during layer building process to make sharing checkpoints between
Cudnn and Cudnn-compatible RNNs easy.
Returns:
a `CudnnOpaqueParamsSaveable` object.
Raises:
RuntimeError: if any custom saveable is already created for this layer.
"""
if self._saveable is not None:
raise RuntimeError("Cudnn saveable already created.")
self._saveable = self._saveable_cls( # pylint:disable=not-callable
opaque_params=self.trainable_variables[0],
num_layers=self.num_layers,
num_units=self.num_units,
input_size=self.input_size,
input_mode=self.input_mode,
direction=self.direction,
scope=vs.get_variable_scope(),
name="%s_saveable" % self.trainable_variables[0].name.split(":")[0])
self._saveable._add_checkpointable_dependencies( # pylint: disable=protected-access
checkpointable=self, dtype=self._plain_dtype)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
class CudnnLSTM(_CudnnRNN):
"""Cudnn implementation of LSTM layer."""
_rnn_mode = CUDNN_LSTM
_num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnLSTMSaveable
def state_shape(self, batch_size):
"""Shape of Cudnn LSTM states.
Shape is a 2-element tuple. Each is
[num_layers * num_dirs, batch_size, num_units]
Args:
batch_size: an int
Returns:
a tuple of python arrays.
"""
return ([self.num_layers * self.num_dirs, batch_size, self.num_units],
[self.num_layers * self.num_dirs, batch_size, self.num_units])
@property
def _gather_saveables_for_checkpoint(self):
if self._direction == CUDNN_RNN_UNIDIRECTION:
# Skip one inheritance level to avoid NotImplementedError.
return super(_CudnnRNN, self)._gather_saveables_for_checkpoint
else:
raise NotImplementedError(
"Object-based saving does not currently support bidirectional LSTM "
"cells. File a feature request if this limitation bothers you.")
class _CudnnRNNNoInputC(_CudnnRNN):
"""Abstract simple CudnnRNN layer without input_c."""
def state_shape(self, batch_size):
"""Shape of the state of Cudnn RNN cells w/o. input_c.
Shape is a 1-element tuple,
[num_layers * num_dirs, batch_size, num_units]
Args:
batch_size: an int
Returns:
a tuple of python arrays.
"""
return [self.num_layers * self.num_dirs, batch_size, self.num_units],
class CudnnGRU(_CudnnRNNNoInputC):
"""Cudnn implementation of the GRU layer."""
_rnn_mode = CUDNN_GRU
_num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnGRUSaveable
class CudnnRNNTanh(_CudnnRNNNoInputC):
"""Cudnn implementation of the RNN-tanh layer."""
_rnn_mode = CUDNN_RNN_TANH
_num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnRNNTanhSaveable
class CudnnRNNRelu(_CudnnRNNNoInputC):
"""Cudnn implementation of the RNN-relu layer."""
_rnn_mode = CUDNN_RNN_RELU
_num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnRNNReluSaveable
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""A RunConfig subclass with TPU support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import util as util_lib
# pylint: disable=protected-access
_TF_CONFIG_ENV = run_config_lib._TF_CONFIG_ENV
_SERVICE_KEY = run_config_lib._SERVICE_KEY
_TPU_WORKER_JOB_NAME = 'tpu_worker_job_name'
# pylint: enable=protected-access
class InputPipelineConfig(object):
r"""Please see the definition of these values in TPUConfig."""
PER_SHARD_V1 = 1
PER_HOST_V1 = 2
PER_HOST_V2 = 3
BROADCAST = 4
SLICED = 5
class TPUConfig(
collections.namedtuple('TPUConfig', [
'iterations_per_loop',
'num_shards',
'num_cores_per_replica',
'per_host_input_for_training',
'tpu_job_name',
'initial_infeed_sleep_secs',
'input_partition_dims',
'eval_training_input_configuration',
])):
r"""TPU related configuration required by `TPUEstimator`.
Args:
iterations_per_loop: This is the number of train steps running in TPU
system before returning to CPU host for each `Session.run`. This means
global step is increased `iterations_per_loop` times in one `Session.run`.
It is recommended to be set as number of global steps for next checkpoint.
Note that in evaluation don't use this value, instead we run total eval
`steps` on TPU for a single `Session.run`.
num_shards: (Deprecated, ignored by TPUEstimator).
The number of model replicas in the system. For non-model-parallelism
case, this number equals the total number of TPU cores. For
model-parallelism, the total number of TPU cores equals
num_cores_per_replica * num_shards.
num_cores_per_replica: Defaults to `None`, which disables model parallelism.
An integer which describes the number of TPU cores per model replica. This
is required by model-parallelism which enables partitioning
the model to multiple cores. Currently num_cores_per_replica must be
1, 2, 4, or 8.
per_host_input_for_training: If `True`, `PER_HOST_V1`, or `PER_HOST_V2`,
`input_fn` is invoked once on each host. With the per-core input pipeline
configuration, it is invoked once for each core.
With a global batch size `train_batch_size` in `TPUEstimator` constructor,
the batch size for each shard is `train_batch_size` // #hosts in the
`True` or `PER_HOST_V1` mode. In `PER_HOST_V2` mode, it is
`train_batch_size` // #cores. In `BROADCAST` mode, `input_fn` is only
invoked once on host 0 and the tensors are broadcasted to all other
replicas. The batch size equals to train_batch_size`. With the per-core
input pipeline configuration, the shard batch size is also
`train_batch_size` // #cores.
Note: per_host_input_for_training==PER_SHARD_V1 only supports mode.TRAIN.
tpu_job_name: The name of the TPU job. Typically, this name is auto-inferred
within TPUEstimator, however when using ClusterSpec propagation in more
esoteric cluster configurations, you may need to specify the job name as a
string.
initial_infeed_sleep_secs: The number of seconds the infeed thread should
wait before enqueueing the first batch. This helps avoid timeouts for
models that require a long compilation time.
input_partition_dims: A nested list to describe the partition dims
for all the tensors from input_fn(). The structure of
input_partition_dims must match the structure of `features` and
`labels` from input_fn(). The total number of partitions must match
`num_cores_per_replica`. For example, if input_fn() returns two tensors:
images with shape [N, H, W, C] and labels [N].
input_partition_dims = [[1, 2, 2, 1], None] will split the images to 4
pieces and feed into 4 TPU cores. labels tensor are directly broadcasted
to all the TPU cores since the partition dims is `None`.
Current limitations: This feature is only supported with the PER_HOST_V2
input mode.
eval_training_input_configuration: If `SLICED`, `input_fn` is only
invoked once on host 0 and the tensors are broadcasted to all other
replicas. Unlike per_host_input_for_training=BROADCAST, each replica will
only get a slice of the data instead of a whole copy. If `PER_HOST_V1`,
the behaviour is determined by per_host_input_for_training.
Raises:
ValueError: If `num_cores_per_replica` is not 1, 2, 4, 8 or 16.
"""
def __new__(
cls,
iterations_per_loop=2,
num_shards=None,
num_cores_per_replica=None,
per_host_input_for_training=True,
tpu_job_name=None,
initial_infeed_sleep_secs=None,
input_partition_dims=None,
eval_training_input_configuration=InputPipelineConfig.PER_HOST_V1):
# Check iterations_per_loop.
util_lib.check_positive_integer(iterations_per_loop,
'TPUConfig iterations_per_loop')
# Check num_shards.
if num_shards is not None:
util_lib.check_positive_integer(num_shards, 'TPUConfig num_shards')
if input_partition_dims is not None:
if len(input_partition_dims) != 1 and len(input_partition_dims) != 2:
raise ValueError(
'input_partition_dims must be a list/tuple with one or two'
' elements.')
if per_host_input_for_training is not InputPipelineConfig.PER_HOST_V2:
raise ValueError(
'input_partition_dims is only supported in PER_HOST_V2 mode.')
if num_cores_per_replica is None:
raise ValueError(
'input_partition_dims requires setting num_cores_per_replica.')
# Check num_cores_per_replica
if num_cores_per_replica is not None:
if num_cores_per_replica not in [1, 2, 4, 8, 16]:
raise ValueError(
'num_cores_per_replica must be 1, 2, 4, 8, or 16; got {}'.format(
str(num_cores_per_replica)))
if eval_training_input_configuration not in [
InputPipelineConfig.PER_HOST_V1, InputPipelineConfig.SLICED
]:
raise ValueError(
'eval_training_input_configuration must be PER_HOST_V1 or SLICED;'
' got {}'.format(str(eval_training_input_configuration)))
# per_host_input_for_training may be True, False, or integer in [1..3].
# Map legacy values (True, False) to numeric values.
if per_host_input_for_training is False:
per_host_input_for_training = InputPipelineConfig.PER_SHARD_V1
elif per_host_input_for_training is True:
per_host_input_for_training = InputPipelineConfig.PER_HOST_V1
# Check initial_infeed_sleep_secs.
if initial_infeed_sleep_secs:
util_lib.check_positive_integer(initial_infeed_sleep_secs,
'TPUConfig initial_infeed_sleep_secs')
tpu_job_name = tpu_job_name or _get_tpu_job_name_from_tf_config()
return super(TPUConfig, cls).__new__(
cls,
iterations_per_loop=iterations_per_loop,
num_shards=num_shards,
num_cores_per_replica=num_cores_per_replica,
per_host_input_for_training=per_host_input_for_training,
tpu_job_name=tpu_job_name,
initial_infeed_sleep_secs=initial_infeed_sleep_secs,
input_partition_dims=input_partition_dims,
eval_training_input_configuration=eval_training_input_configuration)
class RunConfig(run_config_lib.RunConfig):
"""RunConfig with TPU support."""
def __init__(self,
tpu_config=None,
evaluation_master=None,
master=None,
cluster=None,
**kwargs):
"""Constructs a RunConfig.
Args:
tpu_config: the TPUConfig that specifies TPU-specific configuration.
evaluation_master: a string. The address of the master to use for eval.
Defaults to master if not set.
master: a string. The address of the master to use for training.
cluster: a ClusterResolver
**kwargs: keyword config parameters.
Raises:
ValueError: if cluster is not None and the provided session_config has a
cluster_def already.
"""
super(RunConfig, self).__init__(**kwargs)
self._tpu_config = tpu_config or TPUConfig()
self._cluster = cluster
# If user sets master and/or evaluation_master explicitly, including empty
# string '', take it. Otherwise, take the values set by parent class.
if master is not None:
if cluster is not None:
raise ValueError('Both master and cluster are set.')
self._master = master
else:
if cluster:
self._master = cluster.master()
if evaluation_master is not None:
self._evaluation_master = evaluation_master
elif (not self._evaluation_master and
self.task_type != run_config_lib.TaskType.EVALUATOR):
# If the task type is EVALUATOR, it means some cluster manager sets the
# TF_CONFIG. In that case, we respect the configuration in TF_CONFIG.
#
# Otherwise, it means user executes the code without external cluster
# manager. For that, we optimize the user experience by setting
# evaluation_master to master, unless user overwrites it.
self._evaluation_master = self._master
# Set the ClusterSpec to use
if cluster:
self._cluster_spec = cluster.cluster_spec()
# Merge the cluster_def into the ConfigProto.
if self._session_config is None: # pylint: disable=access-member-before-definition
self._session_config = config_pb2.ConfigProto(
allow_soft_placement=True, isolate_session_state=True)
if self._session_config.HasField('cluster_def'):
raise ValueError(
'You cannot provide a ClusterResolver and '
'session_config.cluster_def.')
if self._cluster_spec:
self._session_config.cluster_def.CopyFrom(
self._cluster_spec.as_cluster_def())
def _maybe_overwrite_session_config_for_distributed_training(self):
# Overrides the parent class session_config overwrite for between-graph. TPU
# runs with in-graph, which should not have device filter. Doing nothing
# ("pass") basically disables it.
pass
@property
def evaluation_master(self):
return self._evaluation_master
@property
def master(self):
return self._master
@property
def tpu_config(self):
return self._tpu_config
@property
def cluster(self):
return self._cluster
def replace(self, **kwargs):
if 'tpu_config' not in kwargs:
return super(RunConfig, self).replace(**kwargs)
tpu_config = kwargs.pop('tpu_config')
new_instance = super(RunConfig, self).replace(**kwargs)
new_instance._tpu_config = tpu_config # pylint: disable=protected-access
return new_instance
def _get_tpu_job_name_from_tf_config():
"""Extracts the TPU job name from TF_CONFIG env variable."""
# TODO(xiejw): Extends this to support both TF_CONFIG env variable and cluster
# spec propagation.
tf_config = json.loads(os.environ.get(_TF_CONFIG_ENV, '{}'))
tpu_job_name = tf_config.get(_SERVICE_KEY, {}).get(_TPU_WORKER_JOB_NAME)
if tpu_job_name:
logging.info('Load TPU job name from TF_CONFIG: %s', tpu_job_name)
return tpu_job_name
|
|
#
# Chris Lumens <clumens@redhat.com>
#
# Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.version import *
from pykickstart.commands import *
# This map is keyed on kickstart syntax version as provided by
# pykickstart.version. Within each sub-dict is a mapping from command name
# to the class that handles it. This is an onto mapping - that is, multiple
# command names can map to the same class. However, the Handler will ensure
# that only one instance of each class ever exists.
commandMap = {
FC3: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC3_Bootloader,
"cdrom": method.FC3_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"driverdisk": driverdisk.FC3_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC3_Reboot,
"harddrive": method.FC3_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC3_LangSupport,
"lilo": bootloader.FC3_Bootloader,
"lilocheck": lilocheck.FC3_LiloCheck,
"logvol": logvol.FC3_LogVol,
"monitor": monitor.FC3_Monitor,
"mouse": mouse.FC3_Mouse,
"network": network.FC3_Network,
"nfs": method.FC3_Method,
"part": partition.FC3_Partition,
"partition": partition.FC3_Partition,
"poweroff": reboot.FC3_Reboot,
"raid": raid.FC3_Raid,
"reboot": reboot.FC3_Reboot,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"shutdown": reboot.FC3_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC3_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC3_Method,
"vnc": vnc.FC3_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC3_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on fc3
FC4: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC4_Bootloader,
"cdrom": method.FC3_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC3_Reboot,
"harddrive": method.FC3_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC3_LangSupport,
"logvol": logvol.FC4_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC3_Monitor,
"mouse": mouse.FC3_Mouse,
"network": network.FC4_Network,
"nfs": method.FC3_Method,
"part": partition.FC4_Partition,
"partition": partition.FC4_Partition,
"poweroff": reboot.FC3_Reboot,
"raid": raid.FC4_Raid,
"reboot": reboot.FC3_Reboot,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"shutdown": reboot.FC3_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC3_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC3_Method,
"vnc": vnc.FC3_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC3_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on fc4
FC5: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC4_Bootloader,
"cdrom": method.FC3_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC3_Reboot,
"harddrive": method.FC3_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC5_LangSupport,
"logvol": logvol.FC4_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC3_Monitor,
"mouse": mouse.FC3_Mouse,
"network": network.FC4_Network,
"nfs": method.FC3_Method,
"part": partition.FC4_Partition,
"partition": partition.FC4_Partition,
"poweroff": reboot.FC3_Reboot,
"raid": raid.FC5_Raid,
"reboot": reboot.FC3_Reboot,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"shutdown": reboot.FC3_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC3_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC3_Method,
"vnc": vnc.FC3_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC3_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on fc5
FC6: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC4_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.FC6_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC5_LangSupport,
"logging": logging.FC6_Logging,
"logvol": logvol.FC4_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC6_Monitor,
"mouse": mouse.FC3_Mouse,
"multipath": multipath.FC6_MultiPath,
"network": network.FC6_Network,
"nfs": method.FC6_Method,
"part": partition.FC4_Partition,
"partition": partition.FC4_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.FC5_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.FC6_Repo,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"user": user.FC6_User,
"url": method.FC6_Method,
"vnc": vnc.FC6_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC6_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on fc6
F7: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC4_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.FC6_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.FC4_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC6_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.FC6_Network,
"nfs": method.FC6_Method,
"part": partition.FC4_Partition,
"partition": partition.FC4_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F7_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.FC6_Repo,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC6_Method,
"user": user.FC6_User,
"vnc": vnc.FC6_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC6_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f7
F8: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F8_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.FC6_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.FC4_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC6_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F8_Network,
"nfs": method.FC6_Method,
"part": partition.FC4_Partition,
"partition": partition.FC4_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F7_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F8_Repo,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC6_Method,
"user": user.F8_User,
"vnc": vnc.FC6_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC6_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f8
F9: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F9_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F8_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.F9_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.FC6_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F9_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC6_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.FC6_Method,
"part": partition.F9_Partition,
"partition": partition.F9_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F9_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F8_Repo,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC6_Method,
"user": user.F8_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F9_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f9
F10: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F9_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F8_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.F10_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F9_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.FC6_Method,
"part": partition.F9_Partition,
"partition": partition.F9_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F9_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F8_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC6_Method,
"user": user.F8_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F10_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f10
F11: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F9_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F8_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.F10_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F9_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.FC6_Method,
"part": partition.F11_Partition,
"partition": partition.F11_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F9_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F11_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.FC6_Method,
"user": user.F8_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F10_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f11
F12: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F12_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F12_DriverDisk,
"fcoe": fcoe.F12_Fcoe,
"firewall": firewall.F10_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F12_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.FC6_Method,
"part": partition.F12_Partition,
"partition": partition.F12_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F12_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F11_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.FC6_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F10_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F12_ZFCP,
},
# based on f12
F13: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F12_Bootloader,
"cdrom": method.F13_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F12_DriverDisk,
"fcoe": fcoe.F13_Fcoe,
"firewall": firewall.F10_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.F13_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F12_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.F13_Method,
"part": partition.F12_Partition,
"partition": partition.F12_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F13_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F13_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"sshpw": sshpw.F13_SshPw,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.F13_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F10_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F12_ZFCP,
},
# based on f13
F14: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F14_Bootloader,
"cdrom": method.F14_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F14_DriverDisk,
"fcoe": fcoe.F13_Fcoe,
"firewall": firewall.F14_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.F14_Method,
"ignoredisk": ignoredisk.F14_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"interactive": interactive.F14_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F14_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.F14_Method,
"part": partition.F14_Partition,
"partition": partition.F14_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F14_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F14_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"sshpw": sshpw.F13_SshPw,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.F14_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F14_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F14_ZFCP,
},
# based on f14
F15: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F15_Bootloader,
"cdrom": method.F14_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F14_DriverDisk,
"fcoe": fcoe.F13_Fcoe,
"firewall": firewall.F14_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.F14_Method,
"ignoredisk": ignoredisk.F14_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F15_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.F14_Method,
"part": partition.F14_Partition,
"partition": partition.F14_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F15_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F15_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"sshpw": sshpw.F13_SshPw,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.F14_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F14_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F14_ZFCP,
},
# based on f15
F16: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F15_Bootloader,
"cdrom": method.F14_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F14_DriverDisk,
"fcoe": fcoe.F13_Fcoe,
"firewall": firewall.F14_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.F14_Method,
"ignoredisk": ignoredisk.F14_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F15_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F16_Network,
"nfs": method.F14_Method,
"part": partition.F14_Partition,
"partition": partition.F14_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F15_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F15_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"sshpw": sshpw.F13_SshPw,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.F14_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F14_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F14_ZFCP,
},
# based on fc1
RHEL3: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC3_Bootloader,
"cdrom": method.FC3_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"driverdisk": driverdisk.FC3_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC3_Reboot,
"harddrive": method.FC3_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC3_LangSupport,
"lilo": bootloader.FC3_Bootloader,
"lilocheck": lilocheck.FC3_LiloCheck,
"logvol": logvol.FC3_LogVol,
"monitor": monitor.FC3_Monitor,
"mouse": mouse.RHEL3_Mouse,
"network": network.FC3_Network,
"nfs": method.FC3_Method,
"part": partition.FC3_Partition,
"partition": partition.FC3_Partition,
"poweroff": reboot.FC3_Reboot,
"raid": raid.FC3_Raid,
"reboot": reboot.FC3_Reboot,
"rootpw": rootpw.FC3_RootPw,
"shutdown": reboot.FC3_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC3_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC3_Method,
"vnc": vnc.FC3_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC3_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
},
# based on fc3
RHEL4: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC3_Bootloader,
"cdrom": method.FC3_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC3_Reboot,
"harddrive": method.FC3_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC3_LangSupport,
"lilo": bootloader.FC3_Bootloader,
"lilocheck": lilocheck.FC3_LiloCheck,
"logvol": logvol.FC3_LogVol,
"monitor": monitor.FC3_Monitor,
"mouse": mouse.FC3_Mouse,
"network": network.RHEL4_Network,
"nfs": method.FC3_Method,
"part": partition.FC3_Partition,
"partition": partition.FC3_Partition,
"poweroff": reboot.FC3_Reboot,
"raid": raid.FC3_Raid,
"reboot": reboot.FC3_Reboot,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"shutdown": reboot.FC3_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC3_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC3_Method,
"vnc": vnc.FC3_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC3_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on fc6
RHEL5: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F9_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.RHEL5_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F12_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.FC6_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"key": key.RHEL5_Key,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC5_LangSupport,
"logging": logging.FC6_Logging,
"logvol": logvol.RHEL5_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC6_Monitor,
"mouse": mouse.FC3_Mouse,
"multipath": multipath.FC6_MultiPath,
"network": network.RHEL5_Network,
"nfs": method.FC6_Method,
"part": partition.RHEL5_Partition,
"partition": partition.RHEL5_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.RHEL5_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.FC6_Repo,
"rootpw": rootpw.FC3_RootPw,
"services": services.FC6_Services,
"selinux": selinux.FC3_SELinux,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"user": user.FC6_User,
"url": method.FC6_Method,
"vnc": vnc.FC6_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC6_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f13ish
RHEL6: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.RHEL6_Bootloader,
"cdrom": method.RHEL6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F12_DriverDisk,
"fcoe": fcoe.F13_Fcoe,
"firewall": firewall.F10_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.RHEL6_Method,
"ignoredisk": ignoredisk.RHEL6_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F12_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.RHEL6_Network,
"nfs": method.RHEL6_Method,
"part": partition.F12_Partition,
"partition": partition.F12_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F13_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.RHEL6_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"sshpw": sshpw.F13_SshPw,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.RHEL6_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F10_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F12_ZFCP,
}
}
# This map is keyed on kickstart syntax version as provided by
# pykickstart.version. Within each sub-dict is a mapping from a data object
# name to the class that provides it. This is a bijective mapping - that is,
# each name maps to exactly one data class and all data classes have a name.
# More than one instance of each class is allowed to exist, however.
dataMap = {
FC3: {
"DriverDiskData": driverdisk.FC3_DriverDiskData,
"LogVolData": logvol.FC3_LogVolData,
"NetworkData": network.FC3_NetworkData,
"PartData": partition.FC3_PartData,
"RaidData": raid.FC3_RaidData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
FC4: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"LogVolData": logvol.FC4_LogVolData,
"NetworkData": network.FC4_NetworkData,
"PartData": partition.FC4_PartData,
"RaidData": raid.FC4_RaidData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
FC5: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"LogVolData": logvol.FC4_LogVolData,
"NetworkData": network.FC4_NetworkData,
"PartData": partition.FC4_PartData,
"RaidData": raid.FC5_RaidData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
FC6: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.FC6_IscsiData,
"LogVolData": logvol.FC4_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.FC6_NetworkData,
"PartData": partition.FC4_PartData,
"RaidData": raid.FC5_RaidData,
"RepoData": repo.FC6_RepoData,
"UserData": user.FC6_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F7: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.FC6_IscsiData,
"LogVolData": logvol.FC4_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.FC6_NetworkData,
"PartData": partition.FC4_PartData,
"RaidData": raid.F7_RaidData,
"RepoData": repo.FC6_RepoData,
"UserData": user.FC6_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F8: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.FC6_IscsiData,
"LogVolData": logvol.FC4_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.FC4_PartData,
"RaidData": raid.F7_RaidData,
"RepoData": repo.F8_RepoData,
"UserData": user.F8_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F9: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.FC6_IscsiData,
"LogVolData": logvol.F9_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F9_PartData,
"RaidData": raid.F9_RaidData,
"RepoData": repo.F8_RepoData,
"UserData": user.F8_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F10: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F9_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F9_PartData,
"RaidData": raid.F9_RaidData,
"RepoData": repo.F8_RepoData,
"UserData": user.F8_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F11: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F9_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F11_PartData,
"RaidData": raid.F9_RaidData,
"RepoData": repo.F11_RepoData,
"UserData": user.F8_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F12: {
"DriverDiskData": driverdisk.F12_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F12_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F12_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F12_PartData,
"RaidData": raid.F12_RaidData,
"RepoData": repo.F11_RepoData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F12_ZFCPData,
},
F13: {
"DriverDiskData": driverdisk.F12_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F13_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F12_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F12_PartData,
"RaidData": raid.F13_RaidData,
"RepoData": repo.F13_RepoData,
"SshPwData": sshpw.F13_SshPwData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F12_ZFCPData,
},
F14: {
"DriverDiskData": driverdisk.F14_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F13_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F14_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F14_PartData,
"RaidData": raid.F14_RaidData,
"RepoData": repo.F14_RepoData,
"SshPwData": sshpw.F13_SshPwData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F14_ZFCPData,
},
F15: {
"DriverDiskData": driverdisk.F14_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F13_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F15_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F14_PartData,
"RaidData": raid.F15_RaidData,
"RepoData": repo.F15_RepoData,
"SshPwData": sshpw.F13_SshPwData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F14_ZFCPData,
},
F16: {
"DriverDiskData": driverdisk.F14_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F13_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F15_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F16_NetworkData,
"PartData": partition.F14_PartData,
"RaidData": raid.F15_RaidData,
"RepoData": repo.F15_RepoData,
"SshPwData": sshpw.F13_SshPwData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F14_ZFCPData,
},
RHEL3: {
"DriverDiskData": driverdisk.FC3_DriverDiskData,
"LogVolData": logvol.FC3_LogVolData,
"NetworkData": network.RHEL4_NetworkData,
"PartData": partition.FC3_PartData,
"RaidData": raid.FC3_RaidData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
RHEL4: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"LogVolData": logvol.FC3_LogVolData,
"NetworkData": network.RHEL4_NetworkData,
"PartData": partition.FC3_PartData,
"RaidData": raid.FC3_RaidData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
RHEL5: {
"DriverDiskData": driverdisk.F12_DriverDiskData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.FC6_IscsiData,
"LogVolData": logvol.RHEL5_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.FC6_NetworkData,
"PartData": partition.RHEL5_PartData,
"RaidData": raid.RHEL5_RaidData,
"RepoData": repo.FC6_RepoData,
"UserData": user.FC6_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
RHEL6: {
"DriverDiskData": driverdisk.F12_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F13_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F12_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.RHEL6_NetworkData,
"PartData": partition.F12_PartData,
"RaidData": raid.F13_RaidData,
"RepoData": repo.RHEL6_RepoData,
"SshPwData": sshpw.F13_SshPwData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F12_ZFCPData,
}
}
|
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_spun_description657_all_of
except ImportError:
bt_spun_description657_all_of = sys.modules[
"onshape_client.oas.models.bt_spun_description657_all_of"
]
try:
from onshape_client.oas.models import bt_surface_description1564
except ImportError:
bt_surface_description1564 = sys.modules[
"onshape_client.oas.models.bt_surface_description1564"
]
try:
from onshape_client.oas.models import bt_vector3d389
except ImportError:
bt_vector3d389 = sys.modules["onshape_client.oas.models.bt_vector3d389"]
class BTSpunDescription657(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("type",): {
"PLANE": "PLANE",
"CYLINDER": "CYLINDER",
"CONE": "CONE",
"SPHERE": "SPHERE",
"TORUS": "TORUS",
"SPUN": "SPUN",
"SWEEP": "SWEEP",
"OFFSET": "OFFSET",
"BLEND": "BLEND",
"BSURFACE": "BSURFACE",
"OTHER": "OTHER",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"axis": (bt_vector3d389.BTVector3d389,), # noqa: E501
"bt_type": (str,), # noqa: E501
"origin": (bt_vector3d389.BTVector3d389,), # noqa: E501
"type": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"axis": "axis", # noqa: E501
"bt_type": "btType", # noqa: E501
"origin": "origin", # noqa: E501
"type": "type", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_spun_description657.BTSpunDescription657 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
axis (bt_vector3d389.BTVector3d389): [optional] # noqa: E501
bt_type (str): [optional] # noqa: E501
origin (bt_vector3d389.BTVector3d389): [optional] # noqa: E501
type (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_spun_description657_all_of.BTSpunDescription657AllOf,
bt_surface_description1564.BTSurfaceDescription1564,
],
"oneOf": [],
}
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests mixed precision works correctly with Keras layers and models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.mixed_precision.experimental import test_util as mp_test_util
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
from tensorflow.python.training.tracking import util as trackable_utils
from tensorflow.python.util import nest
class AssertTypeLayer(base_layer.Layer):
"""A layer which asserts it's inputs are a certain type."""
def __init__(self, assert_type=None, **kwargs):
self._assert_type = assert_type
super(AssertTypeLayer, self).__init__(**kwargs)
def assert_input_types(self, inputs):
"""Asserts `inputs` are of the correct type. Should be called in call()."""
if self._assert_type:
inputs_flattened = nest.flatten(inputs)
for inp in inputs_flattened:
assert inp.dtype.base_dtype == self._assert_type, (
'Input tensor has type %s which does not match assert type %s' %
(inp.dtype.name, self._assert_type.name))
class AddLayer(AssertTypeLayer):
"""A layer which adds it's input to a scalar variable."""
def __init__(self, regularizer=None, use_operator=False, **kwargs):
"""Initializes the AddLayer.
Args:
regularizer: The regularizer on the scalar variable.
use_operator: If True, add using the + operator. If False, add using
tf.add.
**kwargs: Passed to AssertTypeLayer constructor.
"""
self._regularizer = regularizer
self._use_operator = use_operator
super(AddLayer, self).__init__(**kwargs)
def build(self, _):
self.v = self.add_weight('v', (), initializer='ones',
regularizer=self._regularizer)
self.built = True
def call(self, inputs):
self.assert_input_types(inputs)
assert inputs.dtype == self.v.dtype
return self._add(inputs, self.v)
def _add(self, x, y):
if self._use_operator:
return x + y
else:
return math_ops.add(x, y)
class AddLayerWithoutAutoCast(AddLayer):
"""Same as AddLayer, but does not use AutoCastVariables."""
def build(self, _):
dtype = self.dtype
if dtype in ('float16', 'bfloat16'):
dtype = 'float32'
self.v = self.add_weight('v', (), initializer='ones', dtype=dtype,
experimental_autocast=False,
regularizer=self._regularizer)
self.built = True
def call(self, inputs):
self.assert_input_types(inputs)
assert self.v.dtype in (dtypes.float32, dtypes.float64)
return self._add(inputs, math_ops.cast(self.v, inputs.dtype))
class IdentityRegularizer(regularizers.Regularizer):
def __call__(self, x):
assert x.dtype == dtypes.float32
return array_ops.identity(x)
# If called outside any strategy.scope() calls, this will return the default
# strategy.
default_strategy_fn = distribution_strategy_context.get_strategy
def create_mirrored_strategy():
if context.num_gpus() >= 1:
return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0'])
else:
return mirrored_strategy.MirroredStrategy(['cpu:0'])
TESTCASES = ({
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy
})
class KerasLayerTest(keras_parameterized.TestCase):
"""Test mixed precision with Keras layers."""
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_variables_in_float32(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
layer = AddLayer(assert_type=dtypes.float16)
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtypes.float16)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(y), 2.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_with_non_autocast_variable(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
layer = AddLayerWithoutAutoCast(assert_type=dtypes.float16)
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtypes.float16)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(y), 2.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_regularizer_runs_in_float32(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
# Test on AddLayer
layer = AddLayer(assert_type=dtypes.float16,
regularizer=IdentityRegularizer())
layer(x)
(regularizer_loss,) = layer.losses
self.assertEqual(regularizer_loss.dtype, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(regularizer_loss), 1.)
# Test on AddLayerWithoutAutoCast
layer = AddLayerWithoutAutoCast(assert_type=dtypes.float16,
regularizer=IdentityRegularizer())
layer(x)
(regularizer_loss,) = layer.losses
self.assertEqual(regularizer_loss.dtype, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(regularizer_loss), 1.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_passing_policy_to_layer(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
# Passing a Policy to 'dtype' sets the policy for that layer.
layer = AddLayer(assert_type=dtypes.float16,
dtype=policy.Policy('infer_float32_vars'))
# layer.dtype refers to the variable dtype
self.assertEqual(layer.dtype, dtypes.float32)
layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
with policy.policy_scope('infer_float32_vars'):
# Passing a Policy to dtype overrides the global Policy
layer = AddLayer(assert_type=dtypes.float16,
dtype=policy.Policy('infer'))
# layer dtype is not yet known
self.assertEqual(layer.dtype, None)
layer(x)
self.assertEqual(layer.v.dtype, dtypes.float16)
self.assertEqual(layer.dtype, dtypes.float16)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_gradient(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope() as strategy:
with policy.policy_scope('infer_float32_vars'):
layer = AddLayer(assert_type=dtypes.float16)
def run_fn():
with backprop.GradientTape() as tape:
y = layer(x)
# Divide by num_replicas_in_sync, as the effective total loss is the
# sum of each of the replica's losses.
y /= strategy.num_replicas_in_sync
# Learning rate is small enough that if applied to a float16 variable,
# the variable will not change. So this tests the learning rate is not
# applied to a float16 value, but instead the float32 variable.
opt = gradient_descent.SGD(2 ** -14)
grad = tape.gradient(y, layer.v)
return opt.apply_gradients([(grad, layer.v)])
op = strategy.experimental_run(run_fn)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
# The gradient with respective to the variable is 1. Since the
# variable is initialized with 1 and the learning rate is 2**-14, the
# new variable value should be: init_val - gradient * learning_rate,
# which is 1 - 1 * 2**-14
self.assertEqual(self.evaluate(layer.v), 1 - 2 ** -14)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_checkpointing_layer_weights(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
layer = AddLayer(assert_type=dtypes.float16)
layer.build(())
layer.set_weights([np.array(100.)])
self.assertEqual(self.evaluate(layer(x)), 101.)
checkpoint = trackable_utils.Checkpoint(layer=layer)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
layer.set_weights([np.array(200.)])
self.assertEqual(self.evaluate(layer(x)), 201.)
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertEqual(layer.get_weights(), [100.])
self.assertEqual(self.evaluate(layer(x)), 101.)
# TODO(reedwm): Allow layers to be saved without using mixed precision, and
# restored with mixed precision? Or vice versa?
class KerasModelTest(keras_parameterized.TestCase):
"""Test mixed precision with Keras models."""
def _is_strategy_supported(self, strategy_fn):
if (strategy_fn != default_strategy_fn and
testing_utils.should_run_eagerly()):
# Distribution strategies do not support running with `run_eagerly=True`
# in Keras Models.
return False
else:
return True
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters({
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'operator',
'strategy_fn': create_mirrored_strategy,
'use_operator': True
}, {
'testcase_name': 'regularizer',
'strategy_fn': create_mirrored_strategy,
'use_regularizer': True
}, {
'testcase_name': 'nocloning',
'strategy_fn': create_mirrored_strategy,
'cloning': False
})
def test_model(self, strategy_fn, use_operator=False, use_regularizer=False,
cloning=True):
if not self._is_strategy_supported(strategy_fn):
return
regularizer = IdentityRegularizer() if use_regularizer else None
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16)
layer = AddLayer(assert_type=dtypes.float16, use_operator=use_operator,
regularizer=regularizer)
y = layer(x)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return math_ops.reduce_mean(y_pred)
# Learning rate is small enough that if applied to a float16 variable,
# the variable will not change. So this tests the learning rate not
# applied to a float16 value, but instead the float32 variable.
opt = gradient_descent.SGD(2 ** -14)
model.compile(opt, loss=loss_fn, cloning=cloning,
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((2, 1))
y = np.ones((2, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2)
model.fit(dataset)
# Variable starts at 1, and should have gradient of 2 ** -14 subtracted
# from it.
expected = 1 - 2 ** -14
if use_regularizer:
# Regularizer adds another 2 ** -14 to the gradient.
expected -= 2 ** -14
self.assertEqual(backend.eval(layer.v), expected)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters({
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'nocloning',
'strategy_fn': create_mirrored_strategy,
'cloning': False,
})
def test_fixed_loss_scaling(self, strategy_fn, cloning=True):
# Note: We do not test mixed precision in this method, only loss scaling.
if not self._is_strategy_supported(strategy_fn):
return
loss_scale = 8.
batch_size = 4
with strategy_fn().scope():
x = layers.Input(shape=(1,), batch_size=batch_size)
layer = AddLayer()
y = layer(x)
# The gradient of 'y' at this point is 1. With loss scaling, the gradient
# is 'loss_scale'. We divide by the batch size since the loss is averaged
# across batch elements.
expected_gradient = loss_scale / batch_size
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn([expected_gradient]))
y = core.Lambda(identity_with_grad_check_fn)(y)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return math_ops.reduce_mean(y_pred)
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
model.compile(opt, loss=loss_fn, cloning=cloning,
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
y = np.ones((batch_size, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size)
model.fit(dataset)
# Variable starts at 1, and should have gradient of 1 subtracted from it.
expected = 0
self.assertEqual(backend.eval(layer.v), expected)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters({
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'loss_scaling',
'strategy_fn': create_mirrored_strategy,
'use_loss_scaling': True
})
def test_advanced_model(self, strategy_fn, use_loss_scaling=False):
# The advanced model tests mixed-precision-related features that would occur
# in a resnet50 model. It tests a model that has:
# * Multiple layers, some which use auto-cast variables and some which do
# not
# * Regularization on some variables and not others.
# * A fixed loss scale (if use_loss_scaling is True)
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
if use_loss_scaling:
loss_scale = 8.
learning_rate = 2 ** -14
with strategy.scope():
with policy.policy_scope(policy.Policy('infer_float32_vars')):
x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16)
layer1 = AddLayer(assert_type=dtypes.float16,
regularizer=IdentityRegularizer(), use_operator=True)
layer2 = AddLayerWithoutAutoCast(assert_type=dtypes.float16,
use_operator=True)
layer3 = AddLayer(assert_type=dtypes.float16, use_operator=False)
layer4 = AddLayerWithoutAutoCast(assert_type=dtypes.float16,
regularizer=IdentityRegularizer(),
use_operator=False)
y = layer1(x)
y = layer2(y)
y = layer3(y)
y = layer4(y)
if use_loss_scaling:
# The gradient of 'y' at this point is 1. With loss scaling, the
# gradient is 'loss_scale'. We divide by the batch size of 2 since the
# loss is averaged across batch elements.
expected_gradient = loss_scale / 2
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
expected_dtype=dtypes.float16,
expected_gradient=[expected_gradient]))
y = core.Lambda(identity_with_grad_check_fn)(y)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
self.assertEqual(y_true.dtype, dtypes.float32)
self.assertEqual(y_pred.dtype, dtypes.float32)
return math_ops.reduce_mean(y_pred)
opt = gradient_descent.SGD(learning_rate)
if use_loss_scaling:
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
model.compile(opt, loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((2, 1))
y = np.ones((2, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2)
model.fit(dataset)
for layer in (layer1, layer2, layer3, layer4):
if layer.losses:
# Layer has weight regularizer
self.assertEqual(backend.eval(layer.v), 1 - 2 * learning_rate)
else:
# Layer does not have weight regularizer
self.assertEqual(backend.eval(layer.v), 1 - learning_rate)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters({
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'nocloning',
'strategy_fn': create_mirrored_strategy,
'cloning': False,
})
def test_dynamic_loss_scaling(self, strategy_fn, cloning=True):
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
initial_loss_scale = 2.
batch_size = 4
expected_gradient = backend.variable([initial_loss_scale / batch_size],
dtype=dtypes.float16)
# If this variable is set to True, the model below will have NaN gradients
have_nan_gradients = backend.variable(False, dtype=dtypes.bool)
with strategy.scope():
with policy.policy_scope(policy.Policy('infer_float32_vars')):
x = layers.Input(shape=(1,), batch_size=batch_size,
dtype=dtypes.float16)
layer = AddLayer(assert_type=dtypes.float16)
y = layer(x)
identity_with_nan_grads = (
mp_test_util.create_identity_with_nan_gradients_fn(
have_nan_gradients))
y = core.Lambda(identity_with_nan_grads)(y)
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
expected_dtype=dtypes.float16,
expected_gradient=expected_gradient))
y = core.Lambda(identity_with_grad_check_fn)(y)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return math_ops.reduce_mean(y_pred)
opt = gradient_descent.SGD(1.)
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=initial_loss_scale, increment_period=2)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
model.compile(opt, loss=loss_fn, cloning=cloning,
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
y = np.ones((batch_size, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size)
model.fit(dataset)
# The variables starts with 1 and has a gradient of 1, so will go down by 1
# each step.
self.assertEqual(backend.eval(layer.v), 0)
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -1)
# There have been two steps without NaNs, so the loss scale will double
backend.set_value(expected_gradient,
backend.get_value(expected_gradient * 2))
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -2)
# Next test with NaN gradients.
backend.set_value(have_nan_gradients, True)
model.fit(dataset)
# Variable should not be updated
self.assertEqual(backend.eval(layer.v), -2)
# Test with finite gradients again
backend.set_value(have_nan_gradients, False)
# The loss scale will be halved due to the NaNs, so the gradient will also
# be halved
backend.set_value(expected_gradient,
backend.get_value(expected_gradient / 2))
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -3)
@parameterized.named_parameters({
'testcase_name': 'base',
'strategy_fn': default_strategy_fn,
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'base_h5',
'strategy_fn': default_strategy_fn,
'h5': True,
}, {
'testcase_name': 'distribute_h5',
'strategy_fn': create_mirrored_strategy,
'h5': True,
})
@test_util.run_in_graph_and_eager_modes
def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False):
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16)
layer = AddLayer(assert_type=dtypes.float16)
y = layer(x)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
model.set_weights([np.array(100.)])
x = np.ones((2, 1), dtype=np.float16)
self.assertAllClose(backend.get_value(model(x)), x + 100.)
suffix = '.h5' if h5 else ''
weights_file = os.path.join(self.get_temp_dir(), 'weights' + suffix)
model.save_weights(weights_file)
model.set_weights([np.array(200.)])
self.assertAllClose(backend.get_value(model(x)), x + 200.)
model.load_weights(weights_file)
self.assertAllClose(backend.get_value(model(x)), x + 100.)
self.assertEqual(model.get_weights(), [np.array(100.)])
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(*TESTCASES)
def test_save_weights_with_dynamic_loss_scaling(self, strategy_fn):
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and
not context.executing_eagerly()):
# TODO(b/121381184): Enable running the test in this case.
return
# Create and run model.
with strategy.scope():
x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float32)
y = AddLayer(assert_type=dtypes.float32)(x)
model = models.Model(inputs=x, outputs=y)
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=1., increment_period=2., multiplier=2.)
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
model.compile(optimizer=opt, loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# Run for 3 steps (6 examples with a batch size of 2)
model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)
self.assertEqual(backend.get_value(loss_scale()), 2)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)
# Save model weights.
save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(save_prefix)
# Run model again for 1 step (2 examples with a batch size of 2)
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
self.assertEqual(backend.get_value(loss_scale()), 4)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 0)
# Load model weights and ensure loss scale weights are restored.
model.load_weights(save_prefix)
self.assertEqual(backend.get_value(loss_scale()), 2)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)
if __name__ == '__main__':
test.main()
|
|
"""Database objects."""
import sqlalchemy
from collections import Mapping
from sqlalchemy.engine.url import make_url
from sqlalchemy.event import listen
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm.session import sessionmaker
from .exceptions import DatabaseAlreadyExists, DatabaseNotFound
from .sessions import Session
from .util import import_string
DATABASE_ALIASES = {
'postgresql': 'sqlalchemy_multidb.databases.PostgresDatabase',
}
class DatabaseManager(object):
"""
Provides a container of databases.
:param scope_func: optional function which defines the current scope.
"""
def __init__(self, scope_func=None):
self._databases = {}
self._scope_func = scope_func
self.Model = declarative_base()
@property
def databases(self):
"""
Gets the databases.
:return: The list with all databases.
"""
return self._databases.values()
def config_from_object(self, config):
"""
Loads the databases from the config.
:param config: The object containing the database config.
"""
for key in ('SQLALCHEMY_DATABASES', 'DATABASES', 'databases'):
databases = self._get_databases_from_object(key, config)
if databases is not None:
for name, url in databases.items():
self.add_database(name, url)
break
def close(self):
"""
Closes all databases.
"""
for database in self._databases.values():
database.close()
self._databases.clear()
def add_database(self, name, url):
"""
Adds a new database from the url.
:param str name: The name of the database.
:param str url: The connection string.
"""
name = name or 'default'
if not isinstance(name, str):
raise TypeError('Parameter name should be a str.')
if not isinstance(url, str):
raise TypeError('Parameter url should be a str.')
if name in self._databases:
raise DatabaseAlreadyExists(name)
self._databases[name] = self._create_database(name, url)
def get_database(self, name=None):
"""
Gets a database by the name.
:param str name: The database name.
:return Database: The database object.
"""
name = name or 'default'
database = self._databases.get(name)
if database:
return database
raise DatabaseNotFound(name)
def remove_database(self, name=None):
"""
Removes a database by the name.
:param name: The database name.
"""
name = name or 'default'
database = self._databases.pop(name, None)
if not database:
raise DatabaseNotFound(name)
database.close()
def session(self, database_name=None):
"""
Gets a new session for the specified database.
:param str database_name: The database name.
:return: The new session.
"""
database_name = database_name or 'default'
database = self._databases.get(database_name)
if database:
return database.session()
raise DatabaseNotFound(database_name)
def scoped_session(self, database_name=None):
"""
Gets a new scoped session for the specified database.
:param str database_name: The database name.
:return: The new scoped session.
"""
database_name = database_name or 'default'
database = self._databases.get(database_name)
if database:
return database.scoped_session()
raise DatabaseNotFound(database_name)
def _create_database(self, name, url):
"""
Creates a new database from the url.
:param str name: The database name.
:param str url: The connection string.
:return Database: A new instance of `Database`.
"""
uri = make_url(url)
class_name = DATABASE_ALIASES.get(uri.drivername)
if class_name is None:
database_cls = Database
else:
database_cls = import_string(class_name)
return database_cls(name, url, scope_func=self._scope_func)
def _get_databases_from_object(self, key, config):
"""
Get the databases from the give config object.
:param str key: The name of the attribute in the config object.
:param config: The config object.
:return dict: The map of databases.
"""
if isinstance(config, Mapping):
return config.get(key)
return getattr(config, key, None)
class Database(object):
"""
Provides methods to get sessions for a specific engine.
"""
def __init__(self, name, url, scope_func):
self._name = name
self._url, engine_params = self._parse_url(url)
self._engine = sqlalchemy.create_engine(self._url, **engine_params)
self._session_maker = sessionmaker(self.engine, class_=Session, expire_on_commit=False)
self._scoped_session_maker = scoped_session(self._session_maker, scopefunc=scope_func)
self.Model = declarative_base()
@property
def name(self):
"""
Gets the database name.
"""
return self._name
@property
def engine(self):
"""
Gets the database engine.
"""
return self._engine
@property
def session_maker(self):
"""
Gets the session maker.
"""
return self._session_maker
@property
def scoped_session_maker(self):
"""
Gets the scoped session maker.
"""
return self._scoped_session_maker
def close(self):
"""
Closes the engine and all its sessions opened.
"""
self._session_maker.close_all()
self._session_maker = None
self._scoped_session_maker = None
self._engine.dispose()
self._engine = None
def session(self):
"""
Gets a new session for the specified database.
"""
return self._session_maker()
def scoped_session(self):
"""
Gets a scoped session for the specified database.
"""
return self._scoped_session_maker()
@staticmethod
def _parse_url(url):
"""
Gets the parameters from the url.
"""
params_keys = {
'case_sensitive': bool,
'convert_unicode': bool,
'echo': bool,
'echo_pool': bool,
'encoding': str,
'isolation_level': str,
'module': str,
'pool_reset_on_return': str,
'strategy': str,
'paramstyle': str,
'logging_name': str,
'pool_logging_name': str,
'max_overflow': int,
'pool_size': int,
'pool_recycle': int,
'pool_timeout': int,
'label_length': int,
}
uri = make_url(url)
kwargs = {'connect_args': {}}
for key, value in uri.query.items():
param_type = params_keys.get(key)
if param_type:
kwargs[key] = param_type(value)
else:
kwargs['connect_args'][key] = value
uri.query.clear()
return str(uri), kwargs
class PostgresDatabase(Database):
"""
PostgreSQL implementation.
Provides support to search_path from the url.
"""
def __init__(self, name, url, scope_func=None):
uri = make_url(url)
self.__search_path = uri.query.pop('search_path', None)
super(PostgresDatabase, self).__init__(name, str(uri), scope_func)
if self.__search_path:
listen(self.engine, 'checkout', self.__on_checkout)
def __on_checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a new connection is open."""
cursor = dbapi_connection.cursor()
cursor.execute('SET search_path TO ' + self.__search_path)
cursor.close()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.types import core
from tensorflow.python.types import internal
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Fallback in case fast_tensor_util is not properly compiled.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
# pylint: enable=g-import-not-at-top
def ExtractBitsFromFloat16(x):
return np.asarray(x, dtype=np.float16).view(np.uint16).item()
def SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromFloat16(x) for x in proto_values])
def _MediumAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
# TODO: Remove the conversion if cython supports np.float16_t
fast_tensor_util.AppendFloat16ArrayToTensorProto(
tensor_proto,
np.asarray(proto_values, dtype=np.float16).view(np.uint16))
def ExtractBitsFromBFloat16(x):
return np.asarray(
x, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16).item()
def SlowAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromBFloat16(x) for x in proto_values])
def FastAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
fast_tensor_util.AppendBFloat16ArrayToTensorProto(
tensor_proto, np.asarray(
proto_values, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16))
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype:
FastAppendBFloat16ArrayToTensorProto,
np.float16:
_MediumAppendFloat16ArrayToTensorProto,
np.float32:
fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64:
fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32:
fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64:
fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.uint16:
fast_tensor_util.AppendUInt16ArrayToTensorProto,
np.uint32:
fast_tensor_util.AppendUInt32ArrayToTensorProto,
np.uint64:
fast_tensor_util.AppendUInt64ArrayToTensorProto,
np.int8:
fast_tensor_util.AppendInt8ArrayToTensorProto,
np.int16:
fast_tensor_util.AppendInt16ArrayToTensorProto,
np.complex64:
fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128:
fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object:
fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool:
fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint16.as_numpy_dtype:
fast_tensor_util.AppendInt16ArrayToTensorProto,
dtypes.quint16.as_numpy_dtype:
fast_tensor_util.AppendUInt16ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([x.item() for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([x.item() for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([x.item() for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([x.item() for x in proto_values])
def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([x.item()[0] for x in proto_values])
def SlowAppendUInt32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint32_val.extend([x.item() for x in proto_values])
def SlowAppendUInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint64_val.extend([x.item() for x in proto_values])
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend(
[v.item() for x in proto_values for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend(
[v.item() for x in proto_values for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([x.item() for x in proto_values])
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype: SlowAppendBFloat16ArrayToTensorProto,
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.uint16: SlowAppendIntArrayToTensorProto,
np.uint32: SlowAppendUInt32ArrayToTensorProto,
np.uint64: SlowAppendUInt64ArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplex64ArrayToTensorProto,
np.complex128: SlowAppendComplex128ArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, (list, tuple)):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8,
dtypes.int16, dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8,
dtypes.qint16, dtypes.quint16, dtypes.qint32, dtypes.uint32, dtypes.uint64
])
# pylint: disable=invalid-name
def _check_failed(v):
# NB. none of the _check_* functions could raise a ValueError, so
# it is safe to use here.
raise ValueError(v)
def _check_quantized(values):
# Cannot rely on `nest` because the leaves are tuples.
if not isinstance(values, (list, tuple)):
_check_failed(values)
if isinstance(values, tuple):
_ = [_check_int(v) for v in values]
else:
_ = [_check_quantized(v) for v in values]
def _generate_isinstance_check(expected_types):
def inner(values):
for v in nest.flatten(values):
if not (isinstance(v, expected_types) or
(isinstance(v, np.ndarray) and
issubclass(v.dtype.type, expected_types))):
_check_failed(v)
return inner
_check_int = _generate_isinstance_check(
(compat.integral_types, tensor_shape.Dimension))
_check_float = _generate_isinstance_check(compat.real_types)
_check_complex = _generate_isinstance_check(compat.complex_types)
_check_str = _generate_isinstance_check(compat.bytes_or_text_types)
_check_bool = _generate_isinstance_check(bool)
def _check_not_tensor(values):
_ = [_check_failed(v) for v in nest.flatten(values)
if isinstance(v, ops.Tensor)]
# pylint: enable=invalid-name
_TF_TO_IS_OK = {
dtypes.bool: _check_bool,
dtypes.complex128: _check_complex,
dtypes.complex64: _check_complex,
dtypes.float16: _check_float,
dtypes.float32: _check_float,
dtypes.float64: _check_float,
dtypes.int16: _check_int,
dtypes.int32: _check_int,
dtypes.int64: _check_int,
dtypes.int8: _check_int,
dtypes.qint16: _check_quantized,
dtypes.qint32: _check_quantized,
dtypes.qint8: _check_quantized,
dtypes.quint16: _check_quantized,
dtypes.quint8: _check_quantized,
dtypes.string: _check_str,
dtypes.uint16: _check_int,
dtypes.uint8: _check_int,
dtypes.uint32: _check_int,
dtypes.uint64: _check_int,
}
def _AssertCompatible(values, dtype):
if dtype is None:
fn = _check_not_tensor
else:
try:
fn = _TF_TO_IS_OK[dtype]
except KeyError:
# There isn't a specific fn, so we try to do the best possible.
if dtype.is_integer:
fn = _check_int
elif dtype.is_floating:
fn = _check_float
elif dtype.is_complex:
fn = _check_complex
elif dtype.is_quantized:
fn = _check_quantized
else:
fn = _check_not_tensor
try:
fn(values)
except ValueError as e:
[mismatch] = e.args
if dtype is None:
raise TypeError("Expected any non-tensor type, got a tensor instead.")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
def _is_array_like(obj): # pylint: disable=invalid-name
"""Check if a given object is array-like."""
if isinstance(obj, ops.Tensor) and not isinstance(obj, ops._EagerTensorBase): # pylint: disable=protected-access
# Tensor implements __array__ only so it can inform the user that it is not
# a valid array.
return False
# TODO(slebedev): an object could also implement C-level array interface.
if (callable(getattr(obj, "__array__", None)) or
isinstance(getattr(obj, "__array_interface__", None), dict)):
return True
try:
memoryview(obj)
except TypeError:
return False
else:
return not isinstance(obj, bytes)
# pylint: disable=invalid-name
@tf_export("make_tensor_proto")
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False,
allow_broadcast=False):
"""Create a TensorProto.
In TensorFlow 2.0, representing tensors as protos should no longer be a
common workflow. That said, this utility function is still useful for
generating TF Serving request protos:
```python
request = tensorflow_serving.apis.predict_pb2.PredictRequest()
request.model_spec.name = "my_model"
request.model_spec.signature_name = "serving_default"
request.inputs["images"].CopyFrom(tf.make_tensor_proto(X_new))
```
`make_tensor_proto` accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto-converted) must have the compatible type with dtype.
`make_tensor_proto` then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
allow_broadcast: Boolean that enables allowing scalars and 1 length vector
broadcasting. Cannot be true when verify_shape is true.
Returns:
A `TensorProto`. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with `tf.make_ndarray(proto)`.
If `values` is a `TensorProto`, it is immediately returned; `dtype` and
`shape` are ignored.
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
"""
if allow_broadcast and verify_shape:
raise ValueError("allow_broadcast and verify_shape are not both allowed.")
if isinstance(values, tensor_pb2.TensorProto):
return values
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = (
dtype in [
dtypes.qint8, dtypes.quint8, dtypes.qint16, dtypes.quint16,
dtypes.qint32
])
if _is_array_like(values):
values = np.asarray(values)
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype and dtype.is_numpy_compatible:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
if dtype and dtype.is_numpy_compatible:
np_dt = dtype.as_numpy_dtype
else:
np_dt = None
# If shape is None, numpy.prod returns None when dtype is not set, but
# raises exception when dtype is set to np.int64
if shape is not None and np.prod(shape, dtype=np.int64) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if (list(nparray.shape) != _GetDenseDimensions(values) and
not is_quantized):
raise ValueError("""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s.""" %
(values, list(nparray.shape),
_GetDenseDimensions(values)))
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and (not hasattr(dtype, "base_dtype") or
dtype.base_dtype != numpy_dtype.base_dtype):
raise TypeError("Incompatible types: %s vs. %s. Value is %s" %
(dtype, nparray.dtype, values))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape, dtype=np.int64)
is_same_size = shape_size == nparray.size
if allow_broadcast:
if nparray.shape == (1,) or nparray.shape == tuple():
pass
elif nparray.size != shape_size:
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
else:
if verify_shape and nparray.shape != tuple(shape):
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto())
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
tensor_proto.tensor_content = nparray.tobytes()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
# At this point, values may be a list of objects that we could not
# identify a common type for (hence it was inferred as
# np.object/dtypes.string). If we are unable to convert it to a
# string, we raise a more helpful error message.
#
# Ideally, we'd be able to convert the elements of the list to a
# common type, but this type inference requires some thinking and
# so we defer it for now.
try:
str_values = [compat.as_bytes(x) for x in proto_values]
except TypeError:
raise TypeError("Failed to convert object of type %s to Tensor. "
"Contents: %s. Consider casting elements to a "
"supported type." % (type(values), values))
tensor_proto.string_val.extend(str_values)
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError(
"Element type not supported in TensorProto: %s" % numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
# pylint: enable=invalid-name
@tf_export("make_ndarray")
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
For example:
```python
# Tensor a has shape (2,3)
a = tf.constant([[1,2,3],[4,5,6]])
proto_tensor = tf.make_tensor_proto(a) # convert `tensor a` to a proto tensor
tf.make_ndarray(proto_tensor) # output: array([[1, 2, 3],
# [4, 5, 6]], dtype=int32)
# output has shape (2,3)
```
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape, dtype=np.int64)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return (np.frombuffer(tensor.tensor_content,
dtype=dtype).copy().reshape(shape))
if tensor_dtype == dtypes.string:
# np.pad throws on these arrays of type np.object.
values = list(tensor.string_val)
padding = num_elements - len(values)
if padding > 0:
last = values[-1] if values else ""
values.extend([last] * padding)
return np.array(values, dtype=dtype).reshape(shape)
if tensor_dtype == dtypes.float16 or tensor_dtype == dtypes.bfloat16:
# the half_val field of the TensorProto stores the binary representation
# of the fp16: we need to reinterpret this as a proper float16
values = np.fromiter(tensor.half_val, dtype=np.uint16)
values.dtype = tensor_dtype.as_numpy_dtype
elif tensor_dtype == dtypes.float32:
values = np.fromiter(tensor.float_val, dtype=dtype)
elif tensor_dtype == dtypes.float64:
values = np.fromiter(tensor.double_val, dtype=dtype)
elif tensor_dtype in [
dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16, dtypes.int8,
dtypes.qint32, dtypes.quint8, dtypes.qint8, dtypes.qint16, dtypes.quint16
]:
values = np.fromiter(tensor.int_val, dtype=dtype)
elif tensor_dtype == dtypes.int64:
values = np.fromiter(tensor.int64_val, dtype=dtype)
elif tensor_dtype == dtypes.uint32:
values = np.fromiter(tensor.uint32_val, dtype=dtype)
elif tensor_dtype == dtypes.uint64:
values = np.fromiter(tensor.uint64_val, dtype=dtype)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)
elif tensor_dtype == dtypes.bool:
values = np.fromiter(tensor.bool_val, dtype=dtype)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
if values.size == 0:
return np.zeros(shape, dtype)
if values.size != num_elements:
values = np.pad(values, (0, num_elements - values.size), "edge")
return values.reshape(shape)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def _ConstantValue(tensor, partial):
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("%r is not a Tensor, has type %s" % (tensor, type(tensor)))
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array(
[dim.value for dim in input_shape.dims],
dtype=tensor.dtype.as_numpy_dtype)
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return np.ndarray(
shape=(),
buffer=np.array([input_shape.ndims], dtype=np.int32),
dtype=np.int32)
else:
return None
elif tensor.op.type == "Range":
start = constant_value(tensor.op.inputs[0])
if start is None:
return None
limit = constant_value(tensor.op.inputs[1])
if limit is None:
return None
delta = constant_value(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
elif tensor.op.type == "Cast":
pre_cast = constant_value(tensor.op.inputs[0])
if pre_cast is None:
return None
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
return pre_cast.astype(cast_dtype.as_numpy_dtype)
elif tensor.op.type == "Concat":
dim = constant_value(tensor.op.inputs[0])
if dim is None:
return None
values = []
for x in tensor.op.inputs[1:]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "ConcatV2":
dim = constant_value(tensor.op.inputs[-1])
if dim is None:
return None
values = []
for x in tensor.op.inputs[:-1]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "Pack":
values = []
# Some imported GraphDefs have Pack ops with zero inputs. Those are invalid
# and shouldn't be produced, but to deal sensibly with them here we check
# and return None.
if not tensor.op.inputs:
return None
# We can't handle axis != 0 Packs at the moment.
if tensor.op.get_attr("axis") != 0:
return None
for x in tensor.op.inputs:
value = constant_value(x, partial)
if value is None and not partial:
return None
values.append(value)
return np.array(values)
elif tensor.op.type == "Unpack":
# We can't handle axis != 0 Unpacks at the moment.
if tensor.op.get_attr("axis") != 0:
return None
value = constant_value(tensor.op.inputs[0], partial)
if value is None:
return None
return value[tensor.value_index]
elif tensor.op.type == "Split":
dim = constant_value(tensor.op.inputs[0])
value = constant_value(tensor.op.inputs[1], partial)
if value is None or dim is None:
return None
split = np.split(value, tensor.op.get_attr("num_split"), dim)
return split[tensor.value_index]
elif tensor.op.type == "Fill":
fill_shape = tensor.shape
fill_value = constant_value(tensor.op.inputs[1])
if fill_shape.is_fully_defined() and fill_value is not None:
return np.full(fill_shape.as_list(), fill_value, dtype=fill_value.dtype)
else:
return None
elif tensor.op.type == "Equal":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.equal(value1, value2)
elif tensor.op.type == "NotEqual":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.not_equal(value1, value2)
elif tensor.op.type == "StopGradient":
return constant_value(tensor.op.inputs[0], partial)
elif tensor.op.type in ("CheckNumericsV2", "DebugIdentityV2", "Identity"):
return constant_value(tensor.op.inputs[0], partial)
else:
return None
@tf_export("get_static_value")
def constant_value(tensor, partial=False): # pylint: disable=invalid-name
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
Example usage:
>>> a = tf.constant(10)
>>> tf.get_static_value(a)
10
>>> b = tf.constant(20)
>>> tf.get_static_value(tf.add(a, b))
30
>>> # `tf.Variable` is not supported.
>>> c = tf.Variable(30)
>>> print(tf.get_static_value(c))
None
Using `partial` option is most relevant when calling `get_static_value` inside
a `tf.function`. Setting it to `True` will return the results but for the
values that cannot be evaluated will be `None`. For example:
```python
class Foo(object):
def __init__(self):
self.a = tf.Variable(1)
self.b = tf.constant(2)
@tf.function
def bar(self, partial):
packed = tf.raw_ops.Pack(values=[self.a, self.b])
static_val = tf.get_static_value(packed, partial=partial)
tf.print(static_val)
f = Foo()
f.bar(partial=True) # `array([None, array(2, dtype=int32)], dtype=object)`
f.bar(partial=False) # `None`
```
Compatibility(V1): If `constant_value(tensor)` returns a non-`None` result, it
will no longer be possible to feed a different value for `tensor`. This allows
the result of this function to influence the graph that is constructed, and
permits static shape optimizations.
Args:
tensor: The Tensor to be evaluated.
partial: If True, the returned numpy array is allowed to have partially
evaluated values. Values that can't be evaluated will be None.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
if isinstance(tensor, ops.EagerTensor):
try:
return tensor.numpy()
except errors_impl.UnimplementedError:
# Some EagerTensors may not implement .numpy/resolve, e.g. parallel
# tensors with multiple components on different devices.
return None
if not is_tensor(tensor):
return tensor
if not isinstance(tensor, ops.Tensor):
return None
ret = _ConstantValue(tensor, partial)
if ret is not None:
# The caller may now depend on the constant value of `tensor`, so we
# conservatively prevent it from being fed.
tensor.graph.prevent_feeding(tensor)
return ret
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-0 or rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
Raises:
ValueError: If the shape is rank-0 and is not statically known to be -1.
"""
if isinstance(tensor, ops.EagerTensor):
return tensor_shape.TensorShape(
[dim if dim != -1 else None for dim in tensor.numpy()])
if tensor.get_shape().ndims == 0:
value = constant_value(tensor)
if value is None:
raise ValueError(
"Received a scalar with unknown value as shape; require a statically "
"known scalar with value '-1' to describe an unknown shape.")
if value != -1:
raise ValueError(
"Received a scalar value '%s' as shape; require a statically known "
"scalar with value '-1' to describe an unknown shape." % value)
return tensor_shape.unknown_shape()
shape = tensor.get_shape().with_rank(1)
if shape == [0]:
return tensor_shape.TensorShape([])
elif tensor.op.type == "Cast":
pre_cast = constant_value_as_shape(tensor.op.inputs[0])
if pre_cast.dims is None:
# the input to cast has a totally undefined shape; just return that.
return pre_cast
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
if cast_dtype not in (dtypes.int32, dtypes.int64):
return tensor_shape.unknown_shape(shape.dims[0].value)
dest_dtype_shape_array = np.array(
[x if x is not None else -1 for x in pre_cast.as_list()]).astype(
cast_dtype.as_numpy_dtype)
return tensor_shape.TensorShape([
x if x >= 0 else None
for x in dest_dtype_shape_array])
elif tensor.op.type == "Shape":
return tensor.op.inputs[0].get_shape()
elif tensor.op.type == "Pack":
ret = tensor_shape.TensorShape([]) # Empty list.
# Since we expect rank 1 inputs, Pack's axis must be zero, otherwise it
# would not be rank 1.
assert tensor.op.get_attr("axis") == 0
for pack_input in tensor.op.inputs:
# `pack_input` must be a scalar. Attempt to evaluate it, and append it
# to `ret`.
pack_input_val = constant_value(pack_input)
if pack_input_val is None or pack_input_val < 0:
new_dim = tensor_shape.Dimension(None)
else:
new_dim = tensor_shape.Dimension(pack_input_val)
ret = ret.concatenate([new_dim])
return ret
elif tensor.op.type == "Concat":
# We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.TensorShape([]) # Empty list.
for concat_input in tensor.op.inputs[1:]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "ConcatV2":
# We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.TensorShape([]) # Empty list.
for concat_input in tensor.op.inputs[:-1]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "StridedSlice":
try:
begin = constant_value(tensor.op.inputs[1])
end = constant_value(tensor.op.inputs[2])
strides = constant_value(tensor.op.inputs[3])
if begin is not None and end is not None and strides is not None:
begin = begin[0]
end = end[0]
strides = strides[0]
begin_mask = tensor.op.get_attr("begin_mask")
if begin_mask == 1:
begin = None
end_mask = tensor.op.get_attr("end_mask")
if end_mask == 1:
end = None
ellipsis_mask = tensor.op.get_attr("ellipsis_mask")
new_axis_mask = tensor.op.get_attr("new_axis_mask")
shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask")
valid_attributes = (not ellipsis_mask and not new_axis_mask and
not shrink_axis_mask and (not begin_mask or
(begin_mask == 1)) and
(not end_mask or (end_mask == 1)))
if valid_attributes: # additional inputs not supported
prev = constant_value_as_shape(tensor.op.inputs[0])
prev = prev[begin:end:strides]
ret = tensor_shape.TensorShape(prev)
return ret
except ValueError: # Could come from get_attr or slicing prev.
pass
except TypeError: # Could come from slicing prev.
pass
elif (tensor.op.type == "Placeholder" and
tensor.op.graph.building_function and
hasattr(tensor.op.graph, "internal_captures")):
# If we are inside a FuncGraph try to lookup the constant value of the
# corresponding external capture. Note that we only look at captures and
# not the fed inputs because those can be fed different values in different
# instantiations of the function call or different iterations of a
# tf.while_loop.
for i, capture in enumerate(tensor.op.graph.internal_captures):
if capture is tensor:
external_capture = tensor.op.graph.external_captures[i]
return constant_value_as_shape(external_capture)
ret = tensor_shape.unknown_shape(shape.dims[0].value)
value = constant_value(tensor)
if value is not None:
ret = ret.merge_with(
tensor_shape.TensorShape([d if d >= 0 else None for d in value]))
return ret
# TODO(mdan): Deprecate in favor of more static-friendly types.
@tf_export("is_tensor")
def is_tf_type(x): # pylint: disable=invalid-name
"""Checks whether `x` is a TF-native type that can be passed to many TF ops.
Use `is_tensor` to differentiate types that can ingested by TensorFlow ops
without any conversion (e.g., `tf.Tensor`, `tf.SparseTensor`, and
`tf.RaggedTensor`) from types that need to be converted into tensors before
they are ingested (e.g., numpy `ndarray` and Python scalars).
For example, in the following code block:
```python
if not tf.is_tensor(t):
t = tf.convert_to_tensor(t)
return t.shape, t.dtype
```
we check to make sure that `t` is a tensor (and convert it if not) before
accessing its `shape` and `dtype`. (But note that not all TensorFlow native
types have shapes or dtypes; `tf.data.Dataset` is an example of a TensorFlow
native type that has neither shape nor dtype.)
Args:
x: A python object to check.
Returns:
`True` if `x` is a TensorFlow-native type.
"""
return (isinstance(x, internal.NativeObject) or
isinstance(x, core.Tensor) or
getattr(x, "is_tensor_like", False))
# Deprecated alias for tensor_util.is_tf_type.
is_tensor = is_tf_type
def shape_tensor(shape): # pylint: disable=invalid-name
"""Convert to an int32 or int64 tensor, defaulting to int32 if empty."""
dtype = None
if isinstance(shape, (tuple, list)):
if not shape:
dtype = dtypes.int32
else:
# If there are Dimension objects in the shape, unwrap them. This can be a
# problem if v1 and v2 TensorShape objects get mixed up in partial
# conversions, leading to shapes such as (1, 2, Dimension(5)), which are
# not convertible to Tensors because of mixed content.
shape = tuple(map(tensor_shape.dimension_value, shape))
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
# DO NOT USE: For testing only.
_ENABLE_MAYBE_SET_STATIC_SHAPE = True
def maybe_set_static_shape(tensor, shape): # pylint: disable=invalid-name
"""Sets the shape of `tensor` to the `shape`'s constant value, if inferrable.
This is a temporary workaround to fix shape inference across functional op
boundaries. E.g.
```python
shape = tf.constant([3])
@tf.function
def f():
u = tf.random_uniform(shape)
return u
```
If we were to rely solely on C++ shape inference, the shape of `u` inside
`f` would be unknown because C++ shape inference is not aware of the outer
graph and all it sees is a Placeholder node when backtracing the captured
tensor for `shape`. `maybe_set_static_shape` computes the static shape value
of `shape` by traversing the `FuncGraph` boundaries and sets the correct
shape.
A longer term solution would be to fix C++ shape inference.
Args:
tensor: A tensor.
shape: A shape tensor.
"""
if (_ENABLE_MAYBE_SET_STATIC_SHAPE and not context.executing_eagerly() and
ops.get_default_graph().building_function and
not tensor.shape.is_fully_defined() and is_tensor(shape)):
shape = shape_tensor(shape)
const_shape = constant_value_as_shape(shape)
tensor.set_shape(const_shape)
|
|
"""Support for Xiaomi Philips Lights."""
import asyncio
import datetime
from datetime import timedelta
from functools import partial
import logging
from math import ceil
from miio import Ceil, DeviceException, PhilipsBulb, PhilipsEyecare, PhilipsMoonlight
from miio.gateway.gateway import (
GATEWAY_MODEL_AC_V1,
GATEWAY_MODEL_AC_V2,
GATEWAY_MODEL_AC_V3,
GatewayException,
)
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_TOKEN
import homeassistant.helpers.config_validation as cv
from homeassistant.util import color, dt
from .const import (
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_GATEWAY,
CONF_MODEL,
DOMAIN,
KEY_COORDINATOR,
MODELS_LIGHT_BULB,
MODELS_LIGHT_CEILING,
MODELS_LIGHT_EYECARE,
MODELS_LIGHT_MONO,
MODELS_LIGHT_MOON,
SERVICE_EYECARE_MODE_OFF,
SERVICE_EYECARE_MODE_ON,
SERVICE_NIGHT_LIGHT_MODE_OFF,
SERVICE_NIGHT_LIGHT_MODE_ON,
SERVICE_REMINDER_OFF,
SERVICE_REMINDER_ON,
SERVICE_SET_DELAYED_TURN_OFF,
SERVICE_SET_SCENE,
)
from .device import XiaomiMiioEntity
from .gateway import XiaomiGatewayDevice
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Philips Light"
DATA_KEY = "light.xiaomi_miio"
# The light does not accept cct values < 1
CCT_MIN = 1
CCT_MAX = 100
DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS = 4
DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES = 1
SUCCESS = ["ok"]
ATTR_SCENE = "scene"
ATTR_DELAYED_TURN_OFF = "delayed_turn_off"
ATTR_TIME_PERIOD = "time_period"
ATTR_NIGHT_LIGHT_MODE = "night_light_mode"
ATTR_AUTOMATIC_COLOR_TEMPERATURE = "automatic_color_temperature"
ATTR_REMINDER = "reminder"
ATTR_EYECARE_MODE = "eyecare_mode"
# Moonlight
ATTR_SLEEP_ASSISTANT = "sleep_assistant"
ATTR_SLEEP_OFF_TIME = "sleep_off_time"
ATTR_TOTAL_ASSISTANT_SLEEP_TIME = "total_assistant_sleep_time"
ATTR_BAND_SLEEP = "band_sleep"
ATTR_BAND = "band"
XIAOMI_MIIO_SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
SERVICE_SCHEMA_SET_SCENE = XIAOMI_MIIO_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_SCENE): vol.All(vol.Coerce(int), vol.Clamp(min=1, max=6))}
)
SERVICE_SCHEMA_SET_DELAYED_TURN_OFF = XIAOMI_MIIO_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_TIME_PERIOD): cv.positive_time_period}
)
SERVICE_TO_METHOD = {
SERVICE_SET_DELAYED_TURN_OFF: {
"method": "async_set_delayed_turn_off",
"schema": SERVICE_SCHEMA_SET_DELAYED_TURN_OFF,
},
SERVICE_SET_SCENE: {
"method": "async_set_scene",
"schema": SERVICE_SCHEMA_SET_SCENE,
},
SERVICE_REMINDER_ON: {"method": "async_reminder_on"},
SERVICE_REMINDER_OFF: {"method": "async_reminder_off"},
SERVICE_NIGHT_LIGHT_MODE_ON: {"method": "async_night_light_mode_on"},
SERVICE_NIGHT_LIGHT_MODE_OFF: {"method": "async_night_light_mode_off"},
SERVICE_EYECARE_MODE_ON: {"method": "async_eyecare_mode_on"},
SERVICE_EYECARE_MODE_OFF: {"method": "async_eyecare_mode_off"},
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Xiaomi light from a config entry."""
entities = []
if config_entry.data[CONF_FLOW_TYPE] == CONF_GATEWAY:
gateway = hass.data[DOMAIN][config_entry.entry_id][CONF_GATEWAY]
# Gateway light
if gateway.model not in [
GATEWAY_MODEL_AC_V1,
GATEWAY_MODEL_AC_V2,
GATEWAY_MODEL_AC_V3,
]:
entities.append(
XiaomiGatewayLight(gateway, config_entry.title, config_entry.unique_id)
)
# Gateway sub devices
sub_devices = gateway.devices
coordinator = hass.data[DOMAIN][config_entry.entry_id][KEY_COORDINATOR]
for sub_device in sub_devices.values():
if sub_device.device_type == "LightBulb":
entities.append(
XiaomiGatewayBulb(coordinator, sub_device, config_entry)
)
if config_entry.data[CONF_FLOW_TYPE] == CONF_DEVICE:
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config_entry.data[CONF_HOST]
token = config_entry.data[CONF_TOKEN]
name = config_entry.title
model = config_entry.data[CONF_MODEL]
unique_id = config_entry.unique_id
_LOGGER.debug("Initializing with host %s (token %s...)", host, token[:5])
if model in MODELS_LIGHT_EYECARE:
light = PhilipsEyecare(host, token)
entity = XiaomiPhilipsEyecareLamp(name, light, config_entry, unique_id)
entities.append(entity)
hass.data[DATA_KEY][host] = entity
entities.append(
XiaomiPhilipsEyecareLampAmbientLight(
name, light, config_entry, unique_id
)
)
# The ambient light doesn't expose additional services.
# A hass.data[DATA_KEY] entry isn't needed.
elif model in MODELS_LIGHT_CEILING:
light = Ceil(host, token)
entity = XiaomiPhilipsCeilingLamp(name, light, config_entry, unique_id)
entities.append(entity)
hass.data[DATA_KEY][host] = entity
elif model in MODELS_LIGHT_MOON:
light = PhilipsMoonlight(host, token)
entity = XiaomiPhilipsMoonlightLamp(name, light, config_entry, unique_id)
entities.append(entity)
hass.data[DATA_KEY][host] = entity
elif model in MODELS_LIGHT_BULB:
light = PhilipsBulb(host, token)
entity = XiaomiPhilipsBulb(name, light, config_entry, unique_id)
entities.append(entity)
hass.data[DATA_KEY][host] = entity
elif model in MODELS_LIGHT_MONO:
light = PhilipsBulb(host, token)
entity = XiaomiPhilipsGenericLight(name, light, config_entry, unique_id)
entities.append(entity)
hass.data[DATA_KEY][host] = entity
else:
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/syssi/philipslight/issues "
"and provide the following data: %s",
model,
)
return
async def async_service_handler(service):
"""Map services to methods on Xiaomi Philips Lights."""
method = SERVICE_TO_METHOD.get(service.service)
params = {
key: value
for key, value in service.data.items()
if key != ATTR_ENTITY_ID
}
if entity_ids := service.data.get(ATTR_ENTITY_ID):
target_devices = [
dev
for dev in hass.data[DATA_KEY].values()
if dev.entity_id in entity_ids
]
else:
target_devices = hass.data[DATA_KEY].values()
update_tasks = []
for target_device in target_devices:
if not hasattr(target_device, method["method"]):
continue
await getattr(target_device, method["method"])(**params)
update_tasks.append(target_device.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
for xiaomi_miio_service, method in SERVICE_TO_METHOD.items():
schema = method.get("schema", XIAOMI_MIIO_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, xiaomi_miio_service, async_service_handler, schema=schema
)
async_add_entities(entities, update_before_add=True)
class XiaomiPhilipsAbstractLight(XiaomiMiioEntity, LightEntity):
"""Representation of a Abstract Xiaomi Philips Light."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._brightness = None
self._available = False
self._state = None
self._state_attrs = {}
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a light command handling error messages."""
try:
result = await self.hass.async_add_executor_job(
partial(func, *args, **kwargs)
)
_LOGGER.debug("Response received from light: %s", result)
return result == SUCCESS
except DeviceException as exc:
if self._available:
_LOGGER.error(mask_error, exc)
self._available = False
return False
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._device.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._device.on)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._try_command("Turning the light off failed.", self._device.off)
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
class XiaomiPhilipsGenericLight(XiaomiPhilipsAbstractLight):
"""Representation of a Generic Xiaomi Philips Light."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._state_attrs.update({ATTR_SCENE: None, ATTR_DELAYED_TURN_OFF: None})
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off}
)
async def async_set_scene(self, scene: int = 1):
"""Set the fixed scene."""
await self._try_command(
"Setting a fixed scene failed.", self._device.set_scene, scene
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off."""
await self._try_command(
"Setting the turn off delay failed.",
self._device.delay_off,
time_period.total_seconds(),
)
@staticmethod
def delayed_turn_off_timestamp(
countdown: int, current: datetime, previous: datetime
):
"""Update the turn off timestamp only if necessary."""
if countdown is not None and countdown > 0:
new = current.replace(microsecond=0) + timedelta(seconds=countdown)
if previous is None:
return new
lower = timedelta(seconds=-DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS)
upper = timedelta(seconds=DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS)
diff = previous - new
if lower < diff < upper:
return previous
return new
return None
class XiaomiPhilipsBulb(XiaomiPhilipsGenericLight):
"""Representation of a Xiaomi Philips Bulb."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._color_temp = None
@property
def color_temp(self):
"""Return the color temperature."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 175
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 333
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_COLOR_TEMP in kwargs:
color_temp = kwargs[ATTR_COLOR_TEMP]
percent_color_temp = self.translate(
color_temp, self.max_mireds, self.min_mireds, CCT_MIN, CCT_MAX
)
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
if ATTR_BRIGHTNESS in kwargs and ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting brightness and color temperature: "
"%s %s%%, %s mireds, %s%% cct",
brightness,
percent_brightness,
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting brightness and color temperature failed: %s bri, %s cct",
self._device.set_brightness_and_color_temperature,
percent_brightness,
percent_color_temp,
)
if result:
self._color_temp = color_temp
self._brightness = brightness
elif ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting color temperature: %s mireds, %s%% cct",
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting color temperature failed: %s cct",
self._device.set_color_temperature,
percent_color_temp,
)
if result:
self._color_temp = color_temp
elif ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._device.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._device.on)
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off}
)
@staticmethod
def translate(value, left_min, left_max, right_min, right_max):
"""Map a value from left span to right span."""
left_span = left_max - left_min
right_span = right_max - right_min
value_scaled = float(value - left_min) / float(left_span)
return int(right_min + (value_scaled * right_span))
class XiaomiPhilipsCeilingLamp(XiaomiPhilipsBulb):
"""Representation of a Xiaomi Philips Ceiling Lamp."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._state_attrs.update(
{ATTR_NIGHT_LIGHT_MODE: None, ATTR_AUTOMATIC_COLOR_TEMPERATURE: None}
)
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 175
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 370
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
ATTR_NIGHT_LIGHT_MODE: state.smart_night_light,
ATTR_AUTOMATIC_COLOR_TEMPERATURE: state.automatic_color_temperature,
}
)
class XiaomiPhilipsEyecareLamp(XiaomiPhilipsGenericLight):
"""Representation of a Xiaomi Philips Eyecare Lamp 2."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._state_attrs.update(
{ATTR_REMINDER: None, ATTR_NIGHT_LIGHT_MODE: None, ATTR_EYECARE_MODE: None}
)
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
ATTR_REMINDER: state.reminder,
ATTR_NIGHT_LIGHT_MODE: state.smart_night_light,
ATTR_EYECARE_MODE: state.eyecare,
}
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off."""
await self._try_command(
"Setting the turn off delay failed.",
self._device.delay_off,
round(time_period.total_seconds() / 60),
)
async def async_reminder_on(self):
"""Enable the eye fatigue notification."""
await self._try_command(
"Turning on the reminder failed.", self._device.reminder_on
)
async def async_reminder_off(self):
"""Disable the eye fatigue notification."""
await self._try_command(
"Turning off the reminder failed.", self._device.reminder_off
)
async def async_night_light_mode_on(self):
"""Turn the smart night light mode on."""
await self._try_command(
"Turning on the smart night light mode failed.",
self._device.smart_night_light_on,
)
async def async_night_light_mode_off(self):
"""Turn the smart night light mode off."""
await self._try_command(
"Turning off the smart night light mode failed.",
self._device.smart_night_light_off,
)
async def async_eyecare_mode_on(self):
"""Turn the eyecare mode on."""
await self._try_command(
"Turning on the eyecare mode failed.", self._device.eyecare_on
)
async def async_eyecare_mode_off(self):
"""Turn the eyecare mode off."""
await self._try_command(
"Turning off the eyecare mode failed.", self._device.eyecare_off
)
@staticmethod
def delayed_turn_off_timestamp(
countdown: int, current: datetime, previous: datetime
):
"""Update the turn off timestamp only if necessary."""
if countdown is not None and countdown > 0:
new = current.replace(second=0, microsecond=0) + timedelta(
minutes=countdown
)
if previous is None:
return new
lower = timedelta(minutes=-DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES)
upper = timedelta(minutes=DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES)
diff = previous - new
if lower < diff < upper:
return previous
return new
return None
class XiaomiPhilipsEyecareLampAmbientLight(XiaomiPhilipsAbstractLight):
"""Representation of a Xiaomi Philips Eyecare Lamp Ambient Light."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
name = f"{name} Ambient Light"
if unique_id is not None:
unique_id = f"{unique_id}-ambient"
super().__init__(name, device, entry, unique_id)
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug(
"Setting brightness of the ambient light: %s %s%%",
brightness,
percent_brightness,
)
result = await self._try_command(
"Setting brightness of the ambient failed: %s",
self._device.set_ambient_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command(
"Turning the ambient light on failed.", self._device.ambient_on
)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._try_command(
"Turning the ambient light off failed.", self._device.ambient_off
)
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.ambient
self._brightness = ceil((255 / 100.0) * state.ambient_brightness)
class XiaomiPhilipsMoonlightLamp(XiaomiPhilipsBulb):
"""Representation of a Xiaomi Philips Zhirui Bedside Lamp."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._hs_color = None
self._state_attrs.pop(ATTR_DELAYED_TURN_OFF)
self._state_attrs.update(
{
ATTR_SLEEP_ASSISTANT: None,
ATTR_SLEEP_OFF_TIME: None,
ATTR_TOTAL_ASSISTANT_SLEEP_TIME: None,
ATTR_BAND_SLEEP: None,
ATTR_BAND: None,
}
)
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 153
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 588
@property
def hs_color(self) -> tuple:
"""Return the hs color value."""
return self._hs_color
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_COLOR_TEMP in kwargs:
color_temp = kwargs[ATTR_COLOR_TEMP]
percent_color_temp = self.translate(
color_temp, self.max_mireds, self.min_mireds, CCT_MIN, CCT_MAX
)
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
if ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
rgb = color.color_hs_to_RGB(*hs_color)
if ATTR_BRIGHTNESS in kwargs and ATTR_HS_COLOR in kwargs:
_LOGGER.debug(
"Setting brightness and color: %s %s%%, %s",
brightness,
percent_brightness,
rgb,
)
result = await self._try_command(
"Setting brightness and color failed: %s bri, %s color",
self._device.set_brightness_and_rgb,
percent_brightness,
rgb,
)
if result:
self._hs_color = hs_color
self._brightness = brightness
elif ATTR_BRIGHTNESS in kwargs and ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting brightness and color temperature: "
"%s %s%%, %s mireds, %s%% cct",
brightness,
percent_brightness,
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting brightness and color temperature failed: %s bri, %s cct",
self._device.set_brightness_and_color_temperature,
percent_brightness,
percent_color_temp,
)
if result:
self._color_temp = color_temp
self._brightness = brightness
elif ATTR_HS_COLOR in kwargs:
_LOGGER.debug("Setting color: %s", rgb)
result = await self._try_command(
"Setting color failed: %s", self._device.set_rgb, rgb
)
if result:
self._hs_color = hs_color
elif ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting color temperature: %s mireds, %s%% cct",
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting color temperature failed: %s cct",
self._device.set_color_temperature,
percent_color_temp,
)
if result:
self._color_temp = color_temp
elif ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._device.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._device.on)
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
self._hs_color = color.color_RGB_to_hs(*state.rgb)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_SLEEP_ASSISTANT: state.sleep_assistant,
ATTR_SLEEP_OFF_TIME: state.sleep_off_time,
ATTR_TOTAL_ASSISTANT_SLEEP_TIME: state.total_assistant_sleep_time,
ATTR_BAND_SLEEP: state.brand_sleep,
ATTR_BAND: state.brand,
}
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off. Unsupported."""
return
class XiaomiGatewayLight(LightEntity):
"""Representation of a gateway device's light."""
def __init__(self, gateway_device, gateway_name, gateway_device_id):
"""Initialize the XiaomiGatewayLight."""
self._gateway = gateway_device
self._name = f"{gateway_name} Light"
self._gateway_device_id = gateway_device_id
self._unique_id = gateway_device_id
self._available = False
self._is_on = None
self._brightness_pct = 100
self._rgb = (255, 255, 255)
self._hs = (0, 0)
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def device_info(self):
"""Return the device info of the gateway."""
return {
"identifiers": {(DOMAIN, self._gateway_device_id)},
}
@property
def name(self):
"""Return the name of this entity, if any."""
return self._name
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def is_on(self):
"""Return true if it is on."""
return self._is_on
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int(255 * self._brightness_pct / 100)
@property
def hs_color(self):
"""Return the hs color value."""
return self._hs
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
def turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_HS_COLOR in kwargs:
rgb = color.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
else:
rgb = self._rgb
if ATTR_BRIGHTNESS in kwargs:
brightness_pct = int(100 * kwargs[ATTR_BRIGHTNESS] / 255)
else:
brightness_pct = self._brightness_pct
self._gateway.light.set_rgb(brightness_pct, rgb)
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the light off."""
self._gateway.light.set_rgb(0, self._rgb)
self.schedule_update_ha_state()
async def async_update(self):
"""Fetch state from the device."""
try:
state_dict = await self.hass.async_add_executor_job(
self._gateway.light.rgb_status
)
except GatewayException as ex:
if self._available:
self._available = False
_LOGGER.error(
"Got exception while fetching the gateway light state: %s", ex
)
return
self._available = True
self._is_on = state_dict["is_on"]
if self._is_on:
self._brightness_pct = state_dict["brightness"]
self._rgb = state_dict["rgb"]
self._hs = color.color_RGB_to_hs(*self._rgb)
class XiaomiGatewayBulb(XiaomiGatewayDevice, LightEntity):
"""Representation of Xiaomi Gateway Bulb."""
@property
def brightness(self):
"""Return the brightness of the light."""
return round((self._sub_device.status["brightness"] * 255) / 100)
@property
def color_temp(self):
"""Return current color temperature."""
return self._sub_device.status["color_temp"]
@property
def is_on(self):
"""Return true if light is on."""
return self._sub_device.status["status"] == "on"
@property
def min_mireds(self):
"""Return min cct."""
return self._sub_device.status["cct_min"]
@property
def max_mireds(self):
"""Return max cct."""
return self._sub_device.status["cct_max"]
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
await self.hass.async_add_executor_job(self._sub_device.on)
if ATTR_COLOR_TEMP in kwargs:
color_temp = kwargs[ATTR_COLOR_TEMP]
await self.hass.async_add_executor_job(
self._sub_device.set_color_temp, color_temp
)
if ATTR_BRIGHTNESS in kwargs:
brightness = round((kwargs[ATTR_BRIGHTNESS] * 100) / 255)
await self.hass.async_add_executor_job(
self._sub_device.set_brightness, brightness
)
async def async_turn_off(self, **kwargsf):
"""Instruct the light to turn off."""
await self.hass.async_add_executor_job(self._sub_device.off)
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
from xmlrpc.client import ServerProxy
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QPushButton
from electrum import bitcoin, util, keystore, ecc
from electrum import transaction
from electrum.plugins import BasePlugin, hook
from electrum.i18n import _
from electrum.wallet import Multisig_Wallet
from electrum.util import bh2u, bfh
from electrum_gui.qt.transaction_dialog import show_transaction
import sys
import traceback
server = ServerProxy('https://cosigner.electrum.org/', allow_none=True)
class Listener(util.DaemonThread):
def __init__(self, parent):
util.DaemonThread.__init__(self)
self.daemon = True
self.parent = parent
self.received = set()
self.keyhashes = []
def set_keyhashes(self, keyhashes):
self.keyhashes = keyhashes
def clear(self, keyhash):
server.delete(keyhash)
self.received.remove(keyhash)
def run(self):
while self.running:
if not self.keyhashes:
time.sleep(2)
continue
for keyhash in self.keyhashes:
if keyhash in self.received:
continue
try:
message = server.get(keyhash)
except Exception as e:
self.print_error("cannot contact cosigner pool")
time.sleep(30)
continue
if message:
self.received.add(keyhash)
self.print_error("received message for", keyhash)
self.parent.obj.cosigner_receive_signal.emit(
keyhash, message)
# poll every 30 seconds
time.sleep(30)
class QReceiveSignalObject(QObject):
cosigner_receive_signal = pyqtSignal(object, object)
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.listener = None
self.obj = QReceiveSignalObject()
self.obj.cosigner_receive_signal.connect(self.on_receive)
self.keys = []
self.cosigner_list = []
@hook
def init_qt(self, gui):
for window in gui.windows:
self.on_new_window(window)
@hook
def on_new_window(self, window):
self.update(window)
@hook
def on_close_window(self, window):
self.update(window)
def is_available(self):
return True
def update(self, window):
wallet = window.wallet
if type(wallet) != Multisig_Wallet:
return
if self.listener is None:
self.print_error("starting listener")
self.listener = Listener(self)
self.listener.start()
elif self.listener:
self.print_error("shutting down listener")
self.listener.stop()
self.listener = None
self.keys = []
self.cosigner_list = []
for key, keystore in wallet.keystores.items():
xpub = keystore.get_master_public_key()
K = bitcoin.deserialize_xpub(xpub)[-1]
_hash = bh2u(bitcoin.Hash(K))
if not keystore.is_watching_only():
self.keys.append((key, _hash, window))
else:
self.cosigner_list.append((window, xpub, K, _hash))
if self.listener:
self.listener.set_keyhashes([t[1] for t in self.keys])
@hook
def transaction_dialog(self, d):
d.cosigner_send_button = b = QPushButton(_("Send to cosigner"))
b.clicked.connect(lambda: self.do_send(d.tx))
d.buttons.insert(0, b)
self.transaction_dialog_update(d)
@hook
def transaction_dialog_update(self, d):
if d.tx.is_complete() or d.wallet.can_sign(d.tx):
d.cosigner_send_button.hide()
return
for window, xpub, K, _hash in self.cosigner_list:
if window.wallet == d.wallet and self.cosigner_can_sign(d.tx, xpub):
d.cosigner_send_button.show()
break
else:
d.cosigner_send_button.hide()
def cosigner_can_sign(self, tx, cosigner_xpub):
from electrum.keystore import is_xpubkey, parse_xpubkey
xpub_set = set([])
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
xpub_set.add(xpub)
return cosigner_xpub in xpub_set
def do_send(self, tx):
for window, xpub, K, _hash in self.cosigner_list:
if not self.cosigner_can_sign(tx, xpub):
continue
raw_tx_bytes = bfh(str(tx))
public_key = ecc.ECPubkey(K)
message = public_key.encrypt_message(raw_tx_bytes).decode('ascii')
try:
server.put(_hash, message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_error(_("Failed to send transaction to cosigning pool") + ':\n' + str(e))
return
window.show_message(_("Your transaction was sent to the cosigning pool.") + '\n' +
_("Open your cosigner wallet to retrieve it."))
def on_receive(self, keyhash, message):
self.print_error("signal arrived for", keyhash)
for key, _hash, window in self.keys:
if _hash == keyhash:
break
else:
self.print_error("keyhash not found")
return
wallet = window.wallet
if isinstance(wallet.keystore, keystore.Hardware_KeyStore):
window.show_warning(_('An encrypted transaction was retrieved from cosigning pool.') + '\n' +
_('However, hardware wallets do not support message decryption, '
'which makes them not compatible with the current design of cosigner pool.'))
return
elif wallet.has_keystore_encryption():
password = window.password_dialog(_('An encrypted transaction was retrieved from cosigning pool.') + '\n' +
_('Please enter your password to decrypt it.'))
if not password:
return
else:
password = None
if not window.question(_("An encrypted transaction was retrieved from cosigning pool.") + '\n' +
_("Do you want to open it now?")):
return
xprv = wallet.keystore.get_master_private_key(password)
if not xprv:
return
try:
k = bitcoin.deserialize_xprv(xprv)[-1]
EC = ecc.ECPrivkey(k)
message = bh2u(EC.decrypt_message(message))
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_error(_('Error decrypting message') + ':\n' + str(e))
return
self.listener.clear(keyhash)
tx = transaction.Transaction(message)
show_transaction(tx, window, prompt_if_unsaved=True)
|
|
import os
import logging
import struct
import claripy
from cle import MetaELF
from cle.backends.elf.symbol import ELFSymbol, ELFSymbolType
from cle.address_translator import AT
from archinfo import ArchX86, ArchAMD64, ArchARM, ArchAArch64, ArchMIPS32, ArchMIPS64, ArchPPC32, ArchPPC64
from ..tablespecs import StringTableSpec
from ..procedures import SIM_PROCEDURES as P, SIM_LIBRARIES as L
from ..state_plugins import SimFilesystem, SimHostFilesystem
from ..storage.file import SimFile, SimFileBase
from ..errors import AngrSyscallError
from .userland import SimUserland
_l = logging.getLogger(name=__name__)
class SimLinux(SimUserland):
"""
OS-specific configuration for \\*nix-y OSes.
"""
def __init__(self, project, **kwargs):
super(SimLinux, self).__init__(project,
syscall_library=L['linux'],
syscall_addr_alignment=project.arch.instruction_alignment,
name="Linux",
**kwargs)
self._loader_addr = None
self._loader_lock_addr = None
self._loader_unlock_addr = None
self._error_catch_tsd_addr = None
self.vsyscall_addr = None
def configure_project(self): # pylint: disable=arguments-differ
self._loader_addr = self.project.loader.extern_object.allocate()
self._loader_lock_addr = self.project.loader.extern_object.allocate()
self._loader_unlock_addr = self.project.loader.extern_object.allocate()
self._error_catch_tsd_addr = self.project.loader.extern_object.allocate()
self.vsyscall_addr = self.project.loader.extern_object.allocate()
self.project.hook(self._loader_addr, P['linux_loader']['LinuxLoader']())
self.project.hook(self._loader_lock_addr, P['linux_loader']['_dl_rtld_lock_recursive']())
self.project.hook(self._loader_unlock_addr, P['linux_loader']['_dl_rtld_unlock_recursive']())
self.project.hook(self._error_catch_tsd_addr,
P['linux_loader']['_dl_initial_error_catch_tsd'](
static_addr=self.project.loader.extern_object.allocate()
)
)
self.project.hook(self.vsyscall_addr, P['linux_kernel']['_vsyscall']())
ld_obj = self.project.loader.linux_loader_object
if ld_obj is not None:
# there are some functions we MUST use the simprocedures for, regardless of what the user wants
self._weak_hook_symbol('__tls_get_addr', L['ld.so'].get('__tls_get_addr', self.arch), ld_obj)
self._weak_hook_symbol('___tls_get_addr', L['ld.so'].get('___tls_get_addr', self.arch), ld_obj)
# set up some static data in the loader object...
# TODO it should be legal to get these from the externs now
_rtld_global = ld_obj.get_symbol('_rtld_global')
if _rtld_global is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0xF08, self._loader_lock_addr)
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0xF10, self._loader_unlock_addr)
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0x990, self._error_catch_tsd_addr)
# TODO: what the hell is this
_rtld_global_ro = ld_obj.get_symbol('_rtld_global_ro')
if _rtld_global_ro is not None:
pass
libc_obj = self.project.loader.find_object('libc.so.6')
if libc_obj:
self._weak_hook_symbol('_dl_vdso_vsym', L['libc.so.6'].get('_dl_vdso_vsym', self.arch), libc_obj)
tls_obj = self.project.loader.tls_object
if tls_obj is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x28, 0x5f43414e41525900) # _CANARY\x00
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x30, 0x5054524755415244)
elif isinstance(self.project.arch, ArchX86):
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x10, self.vsyscall_addr)
elif isinstance(self.project.arch, ArchARM):
self.project.hook(0xffff0fe0, P['linux_kernel']['_kernel_user_helper_get_tls']())
# Only set up ifunc resolution if we are using the ELF backend on AMD64
if isinstance(self.project.loader.main_object, MetaELF):
if isinstance(self.project.arch, (ArchAMD64, ArchX86)):
for binary in self.project.loader.all_objects:
if not isinstance(binary, MetaELF):
continue
for reloc in binary.relocs:
if reloc.symbol is None or reloc.resolvedby is None:
continue
try:
if reloc.resolvedby.subtype != ELFSymbolType.STT_GNU_IFUNC:
continue
except ValueError: # base class Symbol throws this, meaning we don't have an ELFSymbol, etc
continue
gotaddr = reloc.rebased_addr
gotvalue = self.project.loader.memory.unpack_word(gotaddr)
if self.project.is_hooked(gotvalue):
continue
# Replace it with a ifunc-resolve simprocedure!
kwargs = {
'funcaddr': gotvalue,
'gotaddr': gotaddr,
'funcname': reloc.symbol.name
}
# TODO: should this be replaced with hook_symbol?
randaddr = self.project.loader.extern_object.allocate()
self.project.hook(randaddr, P['linux_loader']['IFuncResolver'](**kwargs))
self.project.loader.memory.pack_word(gotaddr, randaddr)
# maybe move this into archinfo?
if self.arch.name == 'X86':
syscall_abis = ['i386']
elif self.arch.name == 'AMD64':
syscall_abis = ['i386', 'amd64']
elif self.arch.name.startswith('ARM'):
syscall_abis = ['arm']
if self.arch.name == 'ARMHF':
syscall_abis.append('armhf')
elif self.arch.name == 'AARCH64':
syscall_abis = ['aarch64']
# https://www.linux-mips.org/wiki/WhatsWrongWithO32N32N64
elif self.arch.name == 'MIPS32':
syscall_abis = ['mips-o32']
elif self.arch.name == 'MIPS64':
syscall_abis = ['mips-n32', 'mips-n64']
elif self.arch.name == 'PPC32':
syscall_abis = ['ppc']
elif self.arch.name == 'PPC64':
syscall_abis = ['ppc64']
else:
syscall_abis = [] # ?
super(SimLinux, self).configure_project(syscall_abis)
def syscall_abi(self, state):
if state.arch.name != 'AMD64':
return None
if state.history.jumpkind == 'Ijk_Sys_int128':
return 'i386'
elif state.history.jumpkind == 'Ijk_Sys_syscall':
return 'amd64'
else:
raise AngrSyscallError("Unknown syscall jumpkind %s" % state.history.jumpkind)
# pylint: disable=arguments-differ
def state_blank(self, fs=None, concrete_fs=False, chroot=None,
cwd=b'/home/user', pathsep=b'/', **kwargs):
state = super(SimLinux, self).state_blank(**kwargs)
if self.project.loader.tls_object is not None:
if isinstance(state.arch, ArchAMD64):
state.regs.fs = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchX86):
state.regs.gs = self.project.loader.tls_object.user_thread_pointer >> 16
elif isinstance(state.arch, (ArchMIPS32, ArchMIPS64)):
state.regs.ulr = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC32):
state.regs.r2 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC64):
state.regs.r13 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchAArch64):
state.regs.tpidr_el0 = self.project.loader.tls_object.user_thread_pointer
if fs is None:
fs = {}
for name in fs:
if type(fs[name]) is str:
fs[name] = fs[name].encode('utf-8')
if type(fs[name]) is bytes:
fs[name] = claripy.BVV(fs[name])
if isinstance(fs[name], claripy.Bits):
fs[name] = SimFile(name, content=fs[name])
if not isinstance(fs[name], SimFileBase):
raise TypeError("Provided fs initializer with unusable type %r" % type(fs[name]))
mounts = {}
if concrete_fs:
mounts[pathsep] = SimHostFilesystem(chroot if chroot is not None else os.path.sep)
state.register_plugin('fs', SimFilesystem(files=fs, pathsep=pathsep, cwd=cwd, mountpoints=mounts))
if self.project.loader.main_object.is_ppc64_abiv1:
state.libc.ppc64_abiv = 'ppc64_1'
return state
def state_entry(self, args=None, env=None, argc=None, **kwargs):
state = super(SimLinux, self).state_entry(**kwargs)
# Handle default values
filename = self.project.filename or 'dummy_filename'
if args is None:
args = [filename]
if env is None:
env = {}
# Prepare argc
if argc is None:
argc = claripy.BVV(len(args), state.arch.bits)
elif type(argc) is int: # pylint: disable=unidiomatic-typecheck
argc = claripy.BVV(argc, state.arch.bits)
# Make string table for args/env/auxv
table = StringTableSpec()
# Add args to string table
table.append_args(args)
# Add environment to string table
table.append_env(env)
# Prepare the auxiliary vector and add it to the end of the string table
# TODO: Actually construct a real auxiliary vector
# current vector is an AT_RANDOM entry where the "random" value is 0xaec0aec0aec0...
aux = [(25, b"\xAE\xC0" * 8)]
for a, b in aux:
table.add_pointer(a)
if isinstance(b, bytes):
table.add_string(b)
else:
table.add_pointer(b)
table.add_null()
table.add_null()
# Dump the table onto the stack, calculate pointers to args, env, and auxv
state.memory.store(state.regs.sp - 16, claripy.BVV(0, 8 * 16))
argv = table.dump(state, state.regs.sp - 16)
envp = argv + ((len(args) + 1) * state.arch.bytes)
auxv = argv + ((len(args) + len(env) + 2) * state.arch.bytes)
# Put argc on stack and fix the stack pointer
newsp = argv - state.arch.bytes
state.memory.store(newsp, argc, endness=state.arch.memory_endness)
state.regs.sp = newsp
if state.arch.name in ('PPC32',):
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
# store argc argv envp auxv in the posix plugin
state.posix.argv = argv
state.posix.argc = argc
state.posix.environ = envp
state.posix.auxv = auxv
self.set_entry_register_values(state)
# set __progname
progname_full = 0
progname = 0
if args:
progname_full = state.mem[argv].long.concrete
progname_cur = progname_full
progname = progname_full
while True:
byte = state.mem[progname_cur].byte.resolved
if byte.symbolic:
break
else:
if state.solver.eval(byte) == ord('/'):
progname = progname_cur + 1
elif state.solver.eval(byte) == 0:
break
progname_cur += 1
# there will be multiple copies of these symbol but the canonical ones (in the main binary,
# or elsewhere if the main binary didn't have one) should get picked up here
for name, val in [
('__progname_full', progname_full),
('__progname', progname),
('__environ', envp),
('environ', envp),
('__libc_stack_end', state.regs.sp)]:
sym = self.project.loader.find_symbol(name)
if sym is not None:
if sym.size != self.arch.bytes:
_l.warning("Something is wrong with %s - bad size", name)
else:
state.mem[sym.rebased_addr].long = val
return state
def set_entry_register_values(self, state):
for reg, val in state.arch.entry_register_values.items():
if isinstance(val, int):
state.registers.store(reg, val)
elif isinstance(val, (str,)):
if val == 'argc':
state.registers.store(reg, state.posix.argc, size=state.arch.bytes)
elif val == 'argv':
state.registers.store(reg, state.posix.argv)
elif val == 'envp':
state.registers.store(reg, state.posix.environ)
elif val == 'auxv':
state.registers.store(reg, state.posix.auxv)
elif val == 'ld_destructor':
# a pointer to the dynamic linker's destructor routine, to be called at exit
# or NULL. We like NULL. It makes things easier.
state.registers.store(reg, 0)
elif val == 'toc':
if self.project.loader.main_object.is_ppc64_abiv1:
state.registers.store(reg, self.project.loader.main_object.ppc64_initial_rtoc)
elif val == 'thread_pointer':
state.registers.store(reg, self.project.loader.tls_object.user_thread_pointer)
else:
_l.warning('Unknown entry point register value indicator "%s"', val)
else:
_l.error('What the ass kind of default value is %s?', val)
def state_full_init(self, **kwargs):
kwargs['addr'] = self._loader_addr
return super(SimLinux, self).state_full_init(**kwargs)
def prepare_function_symbol(self, symbol_name, basic_addr=None):
"""
Prepare the address space with the data necessary to perform relocations pointing to the given symbol.
Returns a 2-tuple. The first item is the address of the function code, the second is the address of the
relocation target.
"""
if self.project.loader.main_object.is_ppc64_abiv1:
if basic_addr is not None:
pointer = self.project.loader.memory.unpack_word(basic_addr)
return pointer, basic_addr
pseudo_hookaddr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
pseudo_toc = self.project.loader.extern_object.allocate(size=0x18)
self.project.loader.extern_object.memory.pack_word(
AT.from_mva(pseudo_toc, self.project.loader.extern_object).to_rva(), pseudo_hookaddr)
return pseudo_hookaddr, pseudo_toc
else:
if basic_addr is None:
basic_addr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
return basic_addr, basic_addr
def initialize_segment_register_x64(self, state, concrete_target):
"""
Set the fs register in the angr to the value of the fs register in the concrete process
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return: None
"""
_l.debug("Synchronizing fs segment register")
state.regs.fs = self._read_fs_register_x64(concrete_target)
def initialize_gdt_x86(self,state,concrete_target):
"""
Create a GDT in the state memory and populate the segment registers.
Rehook the vsyscall address using the real value in the concrete process memory
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return:
"""
_l.debug("Creating fake Global Descriptor Table and synchronizing gs segment register")
gs = self._read_gs_register_x86(concrete_target)
gdt = self.generate_gdt(0x0, gs)
self.setup_gdt(state, gdt)
# Synchronize the address of vsyscall in simprocedures dictionary with the concrete value
_vsyscall_address = concrete_target.read_memory(gs + 0x10, state.project.arch.bits / 8)
_vsyscall_address = struct.unpack(state.project.arch.struct_fmt(), _vsyscall_address)[0]
state.project.rehook_symbol(_vsyscall_address, '_vsyscall')
return gdt
@staticmethod
def _read_fs_register_x64(concrete_target):
'''
Injects a small shellcode to leak the fs segment register address. In Linux x64 this address is pointed by fs[0]
:param concrete_target: ConcreteTarget which will be used to get the fs register address
:return: fs register address
:rtype string
'''
# register used to read the value of the segment register
exfiltration_reg = "rax"
# instruction to inject for reading the value at segment value = offset
read_fs0_x64 = b"\x64\x48\x8B\x04\x25\x00\x00\x00\x00\x90\x90\x90\x90" # mov rax, fs:[0]
return concrete_target.execute_shellcode(read_fs0_x64, exfiltration_reg)
@staticmethod
def _read_gs_register_x86(concrete_target):
'''
Injects a small shellcode to leak the gs segment register address. In Linux x86 this address is pointed by gs[0]
:param concrete_target: ConcreteTarget which will be used to get the gs register address
:return: gs register address
:rtype :str
'''
# register used to read the value of the segment register
exfiltration_reg = "eax"
# instruction to inject for reading the value at segment value = offset
read_gs0_x64 = b"\x65\xA1\x00\x00\x00\x00\x90\x90\x90\x90" # mov eax, gs:[0]
return concrete_target.execute_shellcode(read_gs0_x64, exfiltration_reg)
def get_segment_register_name(self):
if isinstance(self.arch, ArchAMD64):
for register in self.arch.register_list:
if register.name == 'fs':
return register.vex_offset
elif isinstance(self.arch, ArchX86):
for register in self.arch.register_list:
if register.name == 'gs':
return register.vex_offset
return None
|
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_netstream_template
version_added: "2.4"
short_description: Manages NetStream template configuration on HUAWEI CloudEngine switches.
description:
- Manages NetStream template configuration on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present', 'absent']
type:
description:
- Configure the type of netstream record.
required: true
choices: ['ip', 'vxlan']
record_name:
description:
- Configure the name of netstream record.
The value is a string of 1 to 32 case-insensitive characters.
match:
description:
- Configure flexible flow statistics template keywords.
choices: ['destination-address', 'destination-port', 'tos', 'protocol', 'source-address', 'source-port']
collect_counter:
description:
- Configure the number of packets and bytes that are included in the flexible flow statistics sent to NSC.
choices: ['bytes', 'packets']
collect_interface:
description:
- Configure the input or output interface that are included in the flexible flow statistics sent to NSC.
choices: ['input', 'output']
description:
description:
- Configure the description of netstream record.
The value is a string of 1 to 80 case-insensitive characters.
'''
EXAMPLES = '''
- name: netstream template module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Config ipv4 netstream record
ce_netstream_template:
state: present
type: ip
record_name: test
provider: "{{ cli }}"
- name: Undo ipv4 netstream record
ce_netstream_template:
state: absent
type: ip
record_name: test
provider: "{{ cli }}"
- name: Config ipv4 netstream record collect_counter
ce_netstream_template:
state: present
type: ip
record_name: test
collect_counter: bytes
provider: "{{ cli }}"
- name: Undo ipv4 netstream record collect_counter
ce_netstream_template:
state: absent
type: ip
record_name: test
collect_counter: bytes
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"record_name": "test",
"type": "ip",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"record_name": "test",
"type": "ip"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["netstream record test ip"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
class NetstreamTemplate(object):
""" Manages netstream template configuration """
def __init__(self, **kwargs):
""" Netstream template module init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
# netstream config
self.netstream_cfg = None
# module args
self.state = self.module.params['state'] or None
self.type = self.module.params['type'] or None
self.record_name = self.module.params['record_name'] or None
self.match = self.module.params['match'] or None
self.collect_counter = self.module.params['collect_counter'] or None
self.collect_interface = self.module.params['collect_interface'] or None
self.description = self.module.params['description'] or None
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def cli_load_config(self, commands):
""" Cli load configuration """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_netstream_config(self):
""" Cli get netstream configuration """
if self.type == "ip":
cmd = "netstream record %s ip" % self.record_name
else:
cmd = "netstream record %s vxlan inner-ip" % self.record_name
flags = list()
regular = "| section include %s" % cmd
flags.append(regular)
self.netstream_cfg = get_config(self.module, flags)
def check_args(self):
""" Check module args """
if not self.type or not self.record_name:
self.module.fail_json(
msg='Error: Please input type and record_name.')
if self.record_name:
if len(self.record_name) < 1 or len(self.record_name) > 32:
self.module.fail_json(
msg='Error: The len of record_name is out of [1 - 32].')
if self.description:
if len(self.description) < 1 or len(self.description) > 80:
self.module.fail_json(
msg='Error: The len of description is out of [1 - 80].')
def get_proposed(self):
""" Get module proposed """
self.proposed["state"] = self.state
if self.type:
self.proposed["type"] = self.type
if self.record_name:
self.proposed["record_name"] = self.record_name
if self.match:
self.proposed["match"] = self.match
if self.collect_counter:
self.proposed["collect_counter"] = self.collect_counter
if self.collect_interface:
self.proposed["collect_interface"] = self.collect_interface
if self.description:
self.proposed["description"] = self.description
def get_existing(self):
""" Get existing configuration """
self.cli_get_netstream_config()
if self.netstream_cfg:
self.existing["type"] = self.type
self.existing["record_name"] = self.record_name
if self.description:
tmp_value = re.findall(r'description (.*)', self.netstream_cfg)
if tmp_value:
self.existing["description"] = tmp_value[0]
if self.match:
if self.type == "ip":
tmp_value = re.findall(r'match ip (.*)', self.netstream_cfg)
else:
tmp_value = re.findall(r'match inner-ip (.*)', self.netstream_cfg)
if tmp_value:
self.existing["match"] = tmp_value
if self.collect_counter:
tmp_value = re.findall(r'collect counter (.*)', self.netstream_cfg)
if tmp_value:
self.existing["collect_counter"] = tmp_value
if self.collect_interface:
tmp_value = re.findall(r'collect interface (.*)', self.netstream_cfg)
if tmp_value:
self.existing["collect_interface"] = tmp_value
def get_end_state(self):
""" Get end state """
self.cli_get_netstream_config()
if self.netstream_cfg:
self.end_state["type"] = self.type
self.end_state["record_name"] = self.record_name
if self.description:
tmp_value = re.findall(r'description (.*)', self.netstream_cfg)
if tmp_value:
self.end_state["description"] = tmp_value[0]
if self.match:
if self.type == "ip":
tmp_value = re.findall(r'match ip (.*)', self.netstream_cfg)
else:
tmp_value = re.findall(r'match inner-ip (.*)', self.netstream_cfg)
if tmp_value:
self.end_state["match"] = tmp_value
if self.collect_counter:
tmp_value = re.findall(r'collect counter (.*)', self.netstream_cfg)
if tmp_value:
self.end_state["collect_counter"] = tmp_value
if self.collect_interface:
tmp_value = re.findall(r'collect interface (.*)', self.netstream_cfg)
if tmp_value:
self.end_state["collect_interface"] = tmp_value
def present_netstream(self):
""" Present netstream configuration """
cmds = list()
need_create_record = False
if self.type == "ip":
cmd = "netstream record %s ip" % self.record_name
else:
cmd = "netstream record %s vxlan inner-ip" % self.record_name
cmds.append(cmd)
if not self.netstream_cfg:
self.updates_cmd.append(cmd)
need_create_record = True
if self.description:
cmd = "description %s" % self.description
if not self.netstream_cfg or cmd not in self.netstream_cfg:
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.match:
if self.type == "ip":
cmd = "match ip %s" % self.match
cfg = "match ip"
else:
cmd = "match inner-ip %s" % self.match
cfg = "match inner-ip"
if not self.netstream_cfg or cfg not in self.netstream_cfg or self.match != self.existing["match"][0]:
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.collect_counter:
cmd = "collect counter %s" % self.collect_counter
if not self.netstream_cfg or cmd not in self.netstream_cfg:
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.collect_interface:
cmd = "collect interface %s" % self.collect_interface
if not self.netstream_cfg or cmd not in self.netstream_cfg:
cmds.append(cmd)
self.updates_cmd.append(cmd)
if not need_create_record and len(cmds) == 1:
if self.type == "ip":
cmd = "netstream record %s ip" % self.record_name
else:
cmd = "netstream record %s vxlan inner-ip" % self.record_name
cmds.remove(cmd)
if cmds:
self.cli_load_config(cmds)
self.changed = True
def absent_netstream(self):
""" Absent netstream configuration """
cmds = list()
absent_netstream_attr = False
if not self.netstream_cfg:
return
if self.description or self.match or self.collect_counter or self.collect_interface:
absent_netstream_attr = True
if absent_netstream_attr:
if self.type == "ip":
cmd = "netstream record %s ip" % self.record_name
else:
cmd = "netstream record %s vxlan inner-ip" % self.record_name
cmds.append(cmd)
if self.description:
cfg = "description %s" % self.description
if self.netstream_cfg and cfg in self.netstream_cfg:
cmd = "undo description %s" % self.description
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.match:
if self.type == "ip":
cfg = "match ip %s" % self.match
else:
cfg = "match inner-ip %s" % self.match
if self.netstream_cfg and cfg in self.netstream_cfg:
if self.type == "ip":
cmd = "undo match ip %s" % self.match
else:
cmd = "undo match inner-ip %s" % self.match
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.collect_counter:
cfg = "collect counter %s" % self.collect_counter
if self.netstream_cfg and cfg in self.netstream_cfg:
cmd = "undo collect counter %s" % self.collect_counter
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.collect_interface:
cfg = "collect interface %s" % self.collect_interface
if self.netstream_cfg and cfg in self.netstream_cfg:
cmd = "undo collect interface %s" % self.collect_interface
cmds.append(cmd)
self.updates_cmd.append(cmd)
if len(cmds) > 1:
self.cli_load_config(cmds)
self.changed = True
else:
if self.type == "ip":
cmd = "undo netstream record %s ip" % self.record_name
else:
cmd = "undo netstream record %s vxlan inner-ip" % self.record_name
cmds.append(cmd)
self.updates_cmd.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" Work function """
self.check_args()
self.get_proposed()
self.get_existing()
if self.state == "present":
self.present_netstream()
else:
self.absent_netstream()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
type=dict(choices=['ip', 'vxlan'], required=True),
record_name=dict(type='str'),
match=dict(choices=['destination-address', 'destination-port',
'tos', 'protocol', 'source-address', 'source-port']),
collect_counter=dict(choices=['bytes', 'packets']),
collect_interface=dict(choices=['input', 'output']),
description=dict(type='str')
)
argument_spec.update(ce_argument_spec)
module = NetstreamTemplate(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
|
|
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
from scapy.layers.l2 import Ether
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, Timer
from cocotbext.eth import XgmiiFrame, XgmiiSource, XgmiiSink
class TB:
def __init__(self, dut):
self.dut = dut
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 6.4, units="ns").start())
# Ethernet
self.eth_r0_source = XgmiiSource(dut.eth_r0_rxd, dut.eth_r0_rxc, dut.clk, dut.rst)
self.eth_r0_sink = XgmiiSink(dut.eth_r0_txd, dut.eth_r0_txc, dut.clk, dut.rst)
self.eth_r1_source = XgmiiSource(dut.eth_r1_rxd, dut.eth_r1_rxc, dut.clk, dut.rst)
self.eth_r1_sink = XgmiiSink(dut.eth_r1_txd, dut.eth_r1_txc, dut.clk, dut.rst)
self.eth_r2_source = XgmiiSource(dut.eth_r2_rxd, dut.eth_r2_rxc, dut.clk, dut.rst)
self.eth_r2_sink = XgmiiSink(dut.eth_r2_txd, dut.eth_r2_txc, dut.clk, dut.rst)
self.eth_r3_source = XgmiiSource(dut.eth_r3_rxd, dut.eth_r3_rxc, dut.clk, dut.rst)
self.eth_r3_sink = XgmiiSink(dut.eth_r3_txd, dut.eth_r3_txc, dut.clk, dut.rst)
self.eth_r4_source = XgmiiSource(dut.eth_r4_rxd, dut.eth_r4_rxc, dut.clk, dut.rst)
self.eth_r4_sink = XgmiiSink(dut.eth_r4_txd, dut.eth_r4_txc, dut.clk, dut.rst)
self.eth_r5_source = XgmiiSource(dut.eth_r5_rxd, dut.eth_r5_rxc, dut.clk, dut.rst)
self.eth_r5_sink = XgmiiSink(dut.eth_r5_txd, dut.eth_r5_txc, dut.clk, dut.rst)
self.eth_r6_source = XgmiiSource(dut.eth_r6_rxd, dut.eth_r6_rxc, dut.clk, dut.rst)
self.eth_r6_sink = XgmiiSink(dut.eth_r6_txd, dut.eth_r6_txc, dut.clk, dut.rst)
self.eth_r7_source = XgmiiSource(dut.eth_r7_rxd, dut.eth_r7_rxc, dut.clk, dut.rst)
self.eth_r7_sink = XgmiiSink(dut.eth_r7_txd, dut.eth_r7_txc, dut.clk, dut.rst)
self.eth_r8_source = XgmiiSource(dut.eth_r8_rxd, dut.eth_r8_rxc, dut.clk, dut.rst)
self.eth_r8_sink = XgmiiSink(dut.eth_r8_txd, dut.eth_r8_txc, dut.clk, dut.rst)
self.eth_r9_source = XgmiiSource(dut.eth_r9_rxd, dut.eth_r9_rxc, dut.clk, dut.rst)
self.eth_r9_sink = XgmiiSink(dut.eth_r9_txd, dut.eth_r9_txc, dut.clk, dut.rst)
self.eth_r10_source = XgmiiSource(dut.eth_r10_rxd, dut.eth_r10_rxc, dut.clk, dut.rst)
self.eth_r10_sink = XgmiiSink(dut.eth_r10_txd, dut.eth_r10_txc, dut.clk, dut.rst)
self.eth_r11_source = XgmiiSource(dut.eth_r11_rxd, dut.eth_r11_rxc, dut.clk, dut.rst)
self.eth_r11_sink = XgmiiSink(dut.eth_r11_txd, dut.eth_r11_txc, dut.clk, dut.rst)
self.eth_l0_source = XgmiiSource(dut.eth_l0_rxd, dut.eth_l0_rxc, dut.clk, dut.rst)
self.eth_l0_sink = XgmiiSink(dut.eth_l0_txd, dut.eth_l0_txc, dut.clk, dut.rst)
self.eth_l1_source = XgmiiSource(dut.eth_l1_rxd, dut.eth_l1_rxc, dut.clk, dut.rst)
self.eth_l1_sink = XgmiiSink(dut.eth_l1_txd, dut.eth_l1_txc, dut.clk, dut.rst)
self.eth_l2_source = XgmiiSource(dut.eth_l2_rxd, dut.eth_l2_rxc, dut.clk, dut.rst)
self.eth_l2_sink = XgmiiSink(dut.eth_l2_txd, dut.eth_l2_txc, dut.clk, dut.rst)
self.eth_l3_source = XgmiiSource(dut.eth_l3_rxd, dut.eth_l3_rxc, dut.clk, dut.rst)
self.eth_l3_sink = XgmiiSink(dut.eth_l3_txd, dut.eth_l3_txc, dut.clk, dut.rst)
self.eth_l4_source = XgmiiSource(dut.eth_l4_rxd, dut.eth_l4_rxc, dut.clk, dut.rst)
self.eth_l4_sink = XgmiiSink(dut.eth_l4_txd, dut.eth_l4_txc, dut.clk, dut.rst)
self.eth_l5_source = XgmiiSource(dut.eth_l5_rxd, dut.eth_l5_rxc, dut.clk, dut.rst)
self.eth_l5_sink = XgmiiSink(dut.eth_l5_txd, dut.eth_l5_txc, dut.clk, dut.rst)
self.eth_l6_source = XgmiiSource(dut.eth_l6_rxd, dut.eth_l6_rxc, dut.clk, dut.rst)
self.eth_l6_sink = XgmiiSink(dut.eth_l6_txd, dut.eth_l6_txc, dut.clk, dut.rst)
self.eth_l7_source = XgmiiSource(dut.eth_l7_rxd, dut.eth_l7_rxc, dut.clk, dut.rst)
self.eth_l7_sink = XgmiiSink(dut.eth_l7_txd, dut.eth_l7_txc, dut.clk, dut.rst)
self.eth_l8_source = XgmiiSource(dut.eth_l8_rxd, dut.eth_l8_rxc, dut.clk, dut.rst)
self.eth_l8_sink = XgmiiSink(dut.eth_l8_txd, dut.eth_l8_txc, dut.clk, dut.rst)
self.eth_l9_source = XgmiiSource(dut.eth_l9_rxd, dut.eth_l9_rxc, dut.clk, dut.rst)
self.eth_l9_sink = XgmiiSink(dut.eth_l9_txd, dut.eth_l9_txc, dut.clk, dut.rst)
self.eth_l10_source = XgmiiSource(dut.eth_l10_rxd, dut.eth_l10_rxc, dut.clk, dut.rst)
self.eth_l10_sink = XgmiiSink(dut.eth_l10_txd, dut.eth_l10_txc, dut.clk, dut.rst)
self.eth_l11_source = XgmiiSource(dut.eth_l11_rxd, dut.eth_l11_rxc, dut.clk, dut.rst)
self.eth_l11_sink = XgmiiSink(dut.eth_l11_txd, dut.eth_l11_txc, dut.clk, dut.rst)
dut.sw.setimmediatevalue(0)
dut.jp.setimmediatevalue(0)
dut.uart_suspend.setimmediatevalue(0)
dut.uart_dtr.setimmediatevalue(0)
dut.uart_txd.setimmediatevalue(0)
dut.uart_rts.setimmediatevalue(0)
dut.amh_right_mdio_i.setimmediatevalue(0)
dut.amh_left_mdio_i.setimmediatevalue(0)
async def init(self):
self.dut.rst.setimmediatevalue(0)
for k in range(10):
await RisingEdge(self.dut.clk)
self.dut.rst <= 1
for k in range(10):
await RisingEdge(self.dut.clk)
self.dut.rst <= 0
@cocotb.test()
async def run_test(dut):
tb = TB(dut)
await tb.init()
tb.log.info("send test packet")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5a:51:52:53:54:55', dst='02:00:00:00:00:00', type=0x8000)
test_pkt = eth / payload
test_frame = XgmiiFrame.from_payload(test_pkt.build())
await tb.eth_l0_source.send(test_frame)
rx_frame = await tb.eth_l0_sink.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
assert rx_pkt == test_pkt
tb.log.info("update configuration")
payload = bytes(range(15, -1, -1))
eth = Ether(src='5a:51:52:53:54:55', dst='02:00:00:00:00:00', type=0x8099)
test_pkt = eth / payload
test_frame = XgmiiFrame.from_payload(test_pkt.build())
await tb.eth_l11_source.send(test_frame)
await Timer(400, 'ns')
tb.log.info("send test packet")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5a:51:52:53:54:55', dst='02:00:00:00:00:00', type=0x8000)
test_pkt = eth / payload
test_frame = XgmiiFrame.from_payload(test_pkt.build())
await tb.eth_l0_source.send(test_frame)
rx_frame = await tb.eth_r7_sink.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
assert rx_pkt == test_pkt
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'lib', 'axis', 'rtl'))
eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g_fifo.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_rx_64.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_tx_64.v"),
os.path.join(eth_rtl_dir, "lfsr.v"),
os.path.join(eth_rtl_dir, "eth_axis_rx.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
os.path.join(axis_rtl_dir, "axis_crosspoint.v"),
]
parameters = {}
# parameters['A'] = val
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
|
"""
Abstract base class for the various polynomial Classes.
The ABCPolyBase class provides the methods needed to implement the common API
for the various polynomial classes. It operates as a mixin, but uses the
abc module from the stdlib, hence it is only available for Python >= 2.6.
"""
from __future__ import division, absolute_import, print_function
from abc import ABCMeta, abstractmethod, abstractproperty
from numbers import Number
import numpy as np
from . import polyutils as pu
__all__ = ['ABCPolyBase']
class ABCPolyBase(object):
"""An abstract base class for series classes.
ABCPolyBase provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the
methods listed below.
.. versionadded:: 1.9.0
Parameters
----------
coef : array_like
Series coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where
``P_i`` is the basis polynomials of degree ``i``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is the derived class domain.
window : (2,) array_like, optional
Window, see domain for its use. The default value is the
derived class window.
Attributes
----------
coef : (N,) ndarray
Series coefficients in order of increasing degree.
domain : (2,) ndarray
Domain that is mapped to window.
window : (2,) ndarray
Window that domain is mapped to.
Class Attributes
----------------
maxpower : int
Maximum power allowed, i.e., the largest number ``n`` such that
``p(x)**n`` is allowed. This is to limit runaway polynomial size.
domain : (2,) ndarray
Default domain of the class.
window : (2,) ndarray
Default window of the class.
"""
__metaclass__ = ABCMeta
# Not hashable
__hash__ = None
# Opt out of numpy ufuncs and Python ops with ndarray subclasses.
__array_ufunc__ = None
# Limit runaway size. T_n^m has degree n*m
maxpower = 100
@abstractproperty
def domain(self):
pass
@abstractproperty
def window(self):
pass
@abstractproperty
def nickname(self):
pass
@abstractmethod
def _add(self):
pass
@abstractmethod
def _sub(self):
pass
@abstractmethod
def _mul(self):
pass
@abstractmethod
def _div(self):
pass
@abstractmethod
def _pow(self):
pass
@abstractmethod
def _val(self):
pass
@abstractmethod
def _int(self):
pass
@abstractmethod
def _der(self):
pass
@abstractmethod
def _fit(self):
pass
@abstractmethod
def _line(self):
pass
@abstractmethod
def _roots(self):
pass
@abstractmethod
def _fromroots(self):
pass
def has_samecoef(self, other):
"""Check if coefficients match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``coef`` attribute.
Returns
-------
bool : boolean
True if the coefficients are the same, False otherwise.
"""
if len(self.coef) != len(other.coef):
return False
elif not np.all(self.coef == other.coef):
return False
else:
return True
def has_samedomain(self, other):
"""Check if domains match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``domain`` attribute.
Returns
-------
bool : boolean
True if the domains are the same, False otherwise.
"""
return np.all(self.domain == other.domain)
def has_samewindow(self, other):
"""Check if windows match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``window`` attribute.
Returns
-------
bool : boolean
True if the windows are the same, False otherwise.
"""
return np.all(self.window == other.window)
def has_sametype(self, other):
"""Check if types match.
.. versionadded:: 1.7.0
Parameters
----------
other : object
Class instance.
Returns
-------
bool : boolean
True if other is same class as self
"""
return isinstance(other, self.__class__)
def _get_coefficients(self, other):
"""Interpret other as polynomial coefficients.
The `other` argument is checked to see if it is of the same
class as self with identical domain and window. If so,
return its coefficients, otherwise return `other`.
.. versionadded:: 1.9.0
Parameters
----------
other : anything
Object to be checked.
Returns
-------
coef
The coefficients of`other` if it is a compatible instance,
of ABCPolyBase, otherwise `other`.
Raises
------
TypeError
When `other` is an incompatible instance of ABCPolyBase.
"""
if isinstance(other, ABCPolyBase):
if not isinstance(other, self.__class__):
raise TypeError("Polynomial types differ")
elif not np.all(self.domain == other.domain):
raise TypeError("Domains differ")
elif not np.all(self.window == other.window):
raise TypeError("Windows differ")
return other.coef
return other
def __init__(self, coef, domain=None, window=None):
[coef] = pu.as_series([coef], trim=False)
self.coef = coef
if domain is not None:
[domain] = pu.as_series([domain], trim=False)
if len(domain) != 2:
raise ValueError("Domain has wrong number of elements.")
self.domain = domain
if window is not None:
[window] = pu.as_series([window], trim=False)
if len(window) != 2:
raise ValueError("Window has wrong number of elements.")
self.window = window
def __repr__(self):
format = "%s(%s, %s, %s)"
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
window = repr(self.window)[6:-1]
name = self.__class__.__name__
return format % (name, coef, domain, window)
def __str__(self):
format = "%s(%s)"
coef = str(self.coef)
name = self.nickname
return format % (name, coef)
# Pickle and copy
def __getstate__(self):
ret = self.__dict__.copy()
ret['coef'] = self.coef.copy()
ret['domain'] = self.domain.copy()
ret['window'] = self.window.copy()
return ret
def __setstate__(self, dict):
self.__dict__ = dict
# Call
def __call__(self, arg):
off, scl = pu.mapparms(self.domain, self.window)
arg = off + scl*arg
return self._val(arg, self.coef)
def __iter__(self):
return iter(self.coef)
def __len__(self):
return len(self.coef)
# Numeric properties.
def __neg__(self):
return self.__class__(-self.coef, self.domain, self.window)
def __pos__(self):
return self
def __add__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._add(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __sub__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._sub(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __mul__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._mul(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __div__(self, other):
# set to __floordiv__, /, for now.
return self.__floordiv__(other)
def __truediv__(self, other):
# there is no true divide if the rhs is not a Number, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if not isinstance(other, Number) or isinstance(other, bool):
form = "unsupported types for true division: '%s', '%s'"
raise TypeError(form % (type(self), type(other)))
return self.__floordiv__(other)
def __floordiv__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[0]
def __mod__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[1]
def __divmod__(self, other):
try:
othercoef = self._get_coefficients(other)
quo, rem = self._div(self.coef, othercoef)
except (TypeError, ZeroDivisionError) as e:
raise e
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
def __pow__(self, other):
coef = self._pow(self.coef, other, maxpower=self.maxpower)
res = self.__class__(coef, self.domain, self.window)
return res
def __radd__(self, other):
try:
coef = self._add(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rsub__(self, other):
try:
coef = self._sub(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rmul__(self, other):
try:
coef = self._mul(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rdiv__(self, other):
# set to __floordiv__ /.
return self.__rfloordiv__(other)
def __rtruediv__(self, other):
# An instance of ABCPolyBase is not considered a
# Number.
return NotImplemented
def __rfloordiv__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[0]
def __rmod__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[1]
def __rdivmod__(self, other):
try:
quo, rem = self._div(other, self.coef)
except ZeroDivisionError as e:
raise e
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
# Enhance me
# some augmented arithmetic operations could be added here
def __eq__(self, other):
res = (isinstance(other, self.__class__) and
np.all(self.domain == other.domain) and
np.all(self.window == other.window) and
(self.coef.shape == other.coef.shape) and
np.all(self.coef == other.coef))
return res
def __ne__(self, other):
return not self.__eq__(other)
#
# Extra methods.
#
def copy(self):
"""Return a copy.
Returns
-------
new_series : series
Copy of self.
"""
return self.__class__(self.coef, self.domain, self.window)
def degree(self):
"""The degree of the series.
.. versionadded:: 1.5.0
Returns
-------
degree : int
Degree of the series, one less than the number of coefficients.
"""
return len(self) - 1
def cutdeg(self, deg):
"""Truncate series to the given degree.
Reduce the degree of the series to `deg` by discarding the
high order terms. If `deg` is greater than the current degree a
copy of the current series is returned. This can be useful in least
squares where the coefficients of the high degree terms may be very
small.
.. versionadded:: 1.5.0
Parameters
----------
deg : non-negative int
The series is reduced to degree `deg` by discarding the high
order terms. The value of `deg` must be a non-negative integer.
Returns
-------
new_series : series
New instance of series with reduced degree.
"""
return self.truncate(deg + 1)
def trim(self, tol=0):
"""Remove trailing coefficients
Remove trailing coefficients until a coefficient is reached whose
absolute value greater than `tol` or the beginning of the series is
reached. If all the coefficients would be removed the series is set
to ``[0]``. A new series instance is returned with the new
coefficients. The current instance remains unchanged.
Parameters
----------
tol : non-negative number.
All trailing coefficients less than `tol` will be removed.
Returns
-------
new_series : series
Contains the new set of coefficients.
"""
coef = pu.trimcoef(self.coef, tol)
return self.__class__(coef, self.domain, self.window)
def truncate(self, size):
"""Truncate series to length `size`.
Reduce the series to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer. This
can be useful in least squares where the coefficients of the
high degree terms may be very small.
Parameters
----------
size : positive int
The series is reduced to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer.
Returns
-------
new_series : series
New instance of series with truncated coefficients.
"""
isize = int(size)
if isize != size or isize < 1:
raise ValueError("size must be a positive integer")
if isize >= len(self.coef):
coef = self.coef
else:
coef = self.coef[:isize]
return self.__class__(coef, self.domain, self.window)
def convert(self, domain=None, kind=None, window=None):
"""Convert series to a different kind and/or domain and/or window.
Parameters
----------
domain : array_like, optional
The domain of the converted series. If the value is None,
the default domain of `kind` is used.
kind : class, optional
The polynomial series type class to which the current instance
should be converted. If kind is None, then the class of the
current instance is used.
window : array_like, optional
The window of the converted series. If the value is None,
the default window of `kind` is used.
Returns
-------
new_series : series
The returned class can be of different type than the current
instance and/or have a different domain and/or different
window.
Notes
-----
Conversion between domains and class types can result in
numerically ill defined series.
Examples
--------
"""
if kind is None:
kind = self.__class__
if domain is None:
domain = kind.domain
if window is None:
window = kind.window
return self(kind.identity(domain, window=window))
def mapparms(self):
"""Return the mapping parameters.
The returned values define a linear map ``off + scl*x`` that is
applied to the input arguments before the series is evaluated. The
map depends on the ``domain`` and ``window``; if the current
``domain`` is equal to the ``window`` the resulting map is the
identity. If the coefficients of the series instance are to be
used by themselves outside this class, then the linear function
must be substituted for the ``x`` in the standard representation of
the base polynomials.
Returns
-------
off, scl : float or complex
The mapping function is defined by ``off + scl*x``.
Notes
-----
If the current domain is the interval ``[l1, r1]`` and the window
is ``[l2, r2]``, then the linear mapping function ``L`` is
defined by the equations::
L(l1) = l2
L(r1) = r2
"""
return pu.mapparms(self.domain, self.window)
def integ(self, m=1, k=[], lbnd=None):
"""Integrate.
Return a series instance that is the definite integral of the
current series.
Parameters
----------
m : non-negative int
The number of integrations to perform.
k : array_like
Integration constants. The first constant is applied to the
first integration, the second to the second, and so on. The
list of values must less than or equal to `m` in length and any
missing values are set to zero.
lbnd : Scalar
The lower bound of the definite integral.
Returns
-------
new_series : series
A new series representing the integral. The domain is the same
as the domain of the integrated series.
"""
off, scl = self.mapparms()
if lbnd is None:
lbnd = 0
else:
lbnd = off + scl*lbnd
coef = self._int(self.coef, m, k, lbnd, 1./scl)
return self.__class__(coef, self.domain, self.window)
def deriv(self, m=1):
"""Differentiate.
Return a series instance of that is the derivative of the current
series.
Parameters
----------
m : non-negative int
Find the derivative of order `m`.
Returns
-------
new_series : series
A new series representing the derivative. The domain is the same
as the domain of the differentiated series.
"""
off, scl = self.mapparms()
coef = self._der(self.coef, m, scl)
return self.__class__(coef, self.domain, self.window)
def roots(self):
"""Return the roots of the series polynomial.
Compute the roots for the series. Note that the accuracy of the
roots decrease the further outside the domain they lie.
Returns
-------
roots : ndarray
Array containing the roots of the series.
"""
roots = self._roots(self.coef)
return pu.mapdomain(roots, self.window, self.domain)
def linspace(self, n=100, domain=None):
"""Return x, y values at equally spaced points in domain.
Returns the x, y values at `n` linearly spaced points across the
domain. Here y is the value of the polynomial at the points x. By
default the domain is the same as that of the series instance.
This method is intended mostly as a plotting aid.
.. versionadded:: 1.5.0
Parameters
----------
n : int, optional
Number of point pairs to return. The default value is 100.
domain : {None, array_like}, optional
If not None, the specified domain is used instead of that of
the calling instance. It should be of the form ``[beg,end]``.
The default is None which case the class domain is used.
Returns
-------
x, y : ndarray
x is equal to linspace(self.domain[0], self.domain[1], n) and
y is the series evaluated at element of x.
"""
if domain is None:
domain = self.domain
x = np.linspace(domain[0], domain[1], n)
y = self(x)
return x, y
@classmethod
def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None,
window=None):
"""Least squares fit to data.
Return a series instance that is the least squares fit to the data
`y` sampled at `x`. The domain of the returned instance can be
specified and this will often result in a superior fit with less
chance of ill conditioning.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
domain : {None, [beg, end], []}, optional
Domain to use for the returned series. If ``None``,
then a minimal domain that covers the points `x` is chosen. If
``[]`` the class domain is used. The default value was the
class domain in NumPy 1.4 and ``None`` in later versions.
The ``[]`` option was added in numpy 1.5.0.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than this relative to the largest singular value will be
ignored. The default value is len(x)*eps, where eps is the
relative precision of the float type, about 2e-16 in most
cases.
full : bool, optional
Switch determining nature of return value. When it is False
(the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
Weights. If not None the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products
``w[i]*y[i]`` all have the same variance. The default value is
None.
.. versionadded:: 1.5.0
window : {[beg, end]}, optional
Window to use for the returned series. The default
value is the default class domain
.. versionadded:: 1.6.0
Returns
-------
new_series : series
A series that represents the least squares fit to the data and
has the domain specified in the call.
[resid, rank, sv, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
"""
if domain is None:
domain = pu.getdomain(x)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
xnew = pu.mapdomain(x, domain, window)
res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)
if full:
[coef, status] = res
return cls(coef, domain=domain, window=window), status
else:
coef = res
return cls(coef, domain=domain, window=window)
@classmethod
def fromroots(cls, roots, domain=[], window=None):
"""Return series instance that has the specified roots.
Returns a series representing the product
``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
list of roots.
Parameters
----------
roots : array_like
List of roots.
domain : {[], None, array_like}, optional
Domain for the resulting series. If None the domain is the
interval from the smallest root to the largest. If [] the
domain is the class domain. The default is [].
window : {None, array_like}, optional
Window for the returned series. If None the class window is
used. The default is None.
Returns
-------
new_series : series
Series with the specified roots.
"""
[roots] = pu.as_series([roots], trim=False)
if domain is None:
domain = pu.getdomain(roots)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
deg = len(roots)
off, scl = pu.mapparms(domain, window)
rnew = off + scl*roots
coef = cls._fromroots(rnew) / scl**deg
return cls(coef, domain=domain, window=window)
@classmethod
def identity(cls, domain=None, window=None):
"""Identity function.
If ``p`` is the returned series, then ``p(x) == x`` for all
values of x.
Parameters
----------
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
Series of representing the identity.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
off, scl = pu.mapparms(window, domain)
coef = cls._line(off, scl)
return cls(coef, domain, window)
@classmethod
def basis(cls, deg, domain=None, window=None):
"""Series basis polynomial of degree `deg`.
Returns the series representing the basis polynomial of degree `deg`.
.. versionadded:: 1.7.0
Parameters
----------
deg : int
Degree of the basis polynomial for the series. Must be >= 0.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series with the coefficient of the `deg` term set to one and
all others zero.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
ideg = int(deg)
if ideg != deg or ideg < 0:
raise ValueError("deg must be non-negative integer")
return cls([0]*ideg + [1], domain, window)
@classmethod
def cast(cls, series, domain=None, window=None):
"""Convert series to series of this class.
The `series` is expected to be an instance of some polynomial
series of one of the types supported by by the numpy.polynomial
module, but could be some other class that supports the convert
method.
.. versionadded:: 1.7.0
Parameters
----------
series : series
The series instance to be converted.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series of the same kind as the calling class and equal to
`series` when evaluated.
See Also
--------
convert : similar instance method
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
return series.convert(domain, cls, window)
|
|
"""
Field classes.
"""
from __future__ import absolute_import, unicode_literals
import copy
import datetime
import os
import re
import sys
try:
from urllib.parse import urlsplit, urlunsplit
except ImportError: # Python 2
from urlparse import urlsplit, urlunsplit
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
from django.forms.util import ErrorList, from_current_timezone, to_current_timezone
from django.forms.widgets import (
TextInput, NumberInput, EmailInput, URLInput, HiddenInput,
MultipleHiddenInput, ClearableFileInput, CheckboxInput, Select,
NullBooleanSelect, SelectMultiple, DateInput, DateTimeInput, TimeInput,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, FILE_INPUT_CONTRADICTION
)
from django.utils import formats
from django.utils.encoding import smart_text, force_str, force_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField', 'TimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField'
)
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=[], localize=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of addtional validators to use
# localize -- Boolean that specifies if the field should be localized.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'])
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def _has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or inital value we get
# is None, replace it w/ ''.
initial_value = initial if initial is not None else ''
try:
data = self.to_python(data)
except ValidationError:
return True
data_value = data if data is not None else ''
return initial_value != data_value
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None and isinstance(widget, TextInput):
# The HTML attribute is maxlength, not max_length.
attrs.update({'maxlength': str(self.max_length)})
return attrs
class IntegerField(Field):
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
kwargs.setdefault('widget', NumberInput if not kwargs.get('localize') else self.widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
'max_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'),
'max_decimal_places': ungettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'),
'max_whole_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'])
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'])
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(self.error_messages['max_digits'] % {
'max': self.max_digits})
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(self.error_messages['max_decimal_places'] % {
'max': self.decimal_places})
if (self.max_digits is not None and self.decimal_places is not None
and whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(self.error_messages['max_whole_digits'] % {
'max': (self.max_digits - self.decimal_places)})
return value
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.max_digits is not None:
max_length = self.max_digits + 1 # for the sign
if self.decimal_places is None or self.decimal_places > 0:
max_length += 1 # for the dot
attrs['maxlength'] = max_length
if self.decimal_places:
attrs['step'] = '0.%s1' % ('0' * (self.decimal_places-1))
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'])
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'])
if value[0] in self.empty_values and value[1] in self.empty_values:
return None
value = '%s %s' % tuple(value)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message:
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'])
if self.max_length is not None and len(file_name) > self.max_length:
error_values = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'] % error_values)
if not file_name:
raise ValidationError(self.error_messages['invalid'])
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'])
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'])
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def _has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _("Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from django.utils.image import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
# verify() must be called immediately after the constructor.
Image.open(file).verify()
except Exception:
# Pillow (or PIL) doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(self.error_messages['invalid_image']), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(URLField, self).__init__(max_length, min_length, *args, **kwargs)
self.validators.append(validators.URLValidator())
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'])
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
value = urlunsplit(url_fields)
return value
def clean(self, value):
value = self.to_python(value).strip()
return super(URLField, self).clean(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'])
def _has_changed(self, initial, data):
# Sometimes data or initial could be None or '' which should be the
# same thing as False.
if initial == 'False':
# show_hidden_initial may have transformed False to 'False'
initial = False
return bool(initial) != bool(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, and for '1' and '0', which
is what a RadioField will submit. Unlike the Booleanfield we need to
explicitly check for True, because we are not using the bool() function
"""
if value in (True, 'True', '1'):
return True
elif value in (False, 'False', '0'):
return False
else:
return None
def validate(self, value):
pass
def _has_changed(self, initial, data):
# None (unknown) and False (No) are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validates that the value is in self.choices and can be coerced to the
right type.
"""
value = super(TypedChoiceField, self).to_python(value)
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
return value
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'])
return [smart_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'])
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': val})
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set([force_text(value) for value in initial])
data_set = set([force_text(value) for value in data])
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
value = super(TypedMultipleChoiceField, self).to_python(value)
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': choice})
return new_value
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'])
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
}
def __init__(self, fields=(), *args, **kwargs):
super(MultiValueField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = ErrorList()
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'])
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'])
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if self.required and field_value in self.empty_values:
raise ValidationError(self.error_messages['required'])
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter.
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
if field._has_changed(initial, data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'])
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'])
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class IPAddressField(CharField):
default_validators = [validators.validate_ipv4_address]
def to_python(self, value):
if value in self.empty_values:
return ''
return value.strip()
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def clean(self, value):
value = self.to_python(value).strip()
return super(SlugField, self).clean(value)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Glance exception subclasses"""
import urlparse
class RedirectException(Exception):
def __init__(self, url):
self.url = urlparse.urlparse(url)
class GlanceException(Exception):
"""
Base Glance Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred")
def __init__(self, message=None, *args, **kwargs):
if not message:
message = self.message
try:
message = message % kwargs
except Exception:
# at least get the core message out if something happened
pass
super(GlanceException, self).__init__(message)
class MissingArgumentError(GlanceException):
message = _("Missing required argument.")
class MissingCredentialError(GlanceException):
message = _("Missing required credential: %(required)s")
class BadAuthStrategy(GlanceException):
message = _("Incorrect auth strategy, expected \"%(expected)s\" but "
"received \"%(received)s\"")
class NotFound(GlanceException):
message = _("An object with the specified identifier was not found.")
class UnknownScheme(GlanceException):
message = _("Unknown scheme '%(scheme)s' found in URI")
class BadStoreUri(GlanceException):
message = _("The Store URI was malformed.")
class Duplicate(GlanceException):
message = _("An object with the same identifier already exists.")
class StorageFull(GlanceException):
message = _("There is not enough disk space on the image storage media.")
class StorageWriteDenied(GlanceException):
message = _("Permission to write image storage media denied.")
class AuthBadRequest(GlanceException):
message = _("Connect error/bad request to Auth service at URL %(url)s.")
class AuthUrlNotFound(GlanceException):
message = _("Auth service at URL %(url)s not found.")
class AuthorizationFailure(GlanceException):
message = _("Authorization failed.")
class NotAuthenticated(GlanceException):
message = _("You are not authenticated.")
class Forbidden(GlanceException):
message = _("You are not authorized to complete this action.")
class ForbiddenPublicImage(Forbidden):
message = _("You are not authorized to complete this action.")
#NOTE(bcwaldon): here for backwards-compatability, need to deprecate.
class NotAuthorized(Forbidden):
message = _("You are not authorized to complete this action.")
class Invalid(GlanceException):
message = _("Data supplied was not valid.")
class InvalidSortKey(Invalid):
message = _("Sort key supplied was not valid.")
class InvalidFilterRangeValue(Invalid):
message = _("Unable to filter using the specified range.")
class AuthorizationRedirect(GlanceException):
message = _("Redirecting to %(uri)s for authorization.")
class DatabaseMigrationError(GlanceException):
message = _("There was an error migrating the database.")
class ClientConnectionError(GlanceException):
message = _("There was an error connecting to a server")
class ClientConfigurationError(GlanceException):
message = _("There was an error configuring the client.")
class MultipleChoices(GlanceException):
message = _("The request returned a 302 Multiple Choices. This generally "
"means that you have not included a version indicator in a "
"request URI.\n\nThe body of response returned:\n%(body)s")
class LimitExceeded(GlanceException):
message = _("The request returned a 413 Request Entity Too Large. This "
"generally means that rate limiting or a quota threshold was "
"breached.\n\nThe response body:\n%(body)s")
def __init__(self, *args, **kwargs):
self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
else None)
super(LimitExceeded, self).__init__(*args, **kwargs)
class ServiceUnavailable(GlanceException):
message = _("The request returned 503 Service Unavilable. This "
"generally occurs on service overload or other transient "
"outage.")
def __init__(self, *args, **kwargs):
self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
else None)
super(ServiceUnavailable, self).__init__(*args, **kwargs)
class ServerError(GlanceException):
message = _("The request returned 500 Internal Server Error.")
class UnexpectedStatus(GlanceException):
message = _("The request returned an unexpected status: %(status)s."
"\n\nThe response body:\n%(body)s")
class InvalidContentType(GlanceException):
message = _("Invalid content type %(content_type)s")
class BadRegistryConnectionConfiguration(GlanceException):
message = _("Registry was not configured correctly on API server. "
"Reason: %(reason)s")
class BadStoreConfiguration(GlanceException):
message = _("Store %(store_name)s could not be configured correctly. "
"Reason: %(reason)s")
class BadDriverConfiguration(GlanceException):
message = _("Driver %(driver_name)s could not be configured correctly. "
"Reason: %(reason)s")
class StoreDeleteNotSupported(GlanceException):
message = _("Deleting images from this store is not supported.")
class StoreAddDisabled(GlanceException):
message = _("Configuration for store failed. Adding images to this "
"store is disabled.")
class InvalidNotifierStrategy(GlanceException):
message = _("'%(strategy)s' is not an available notifier strategy.")
class MaxRedirectsExceeded(GlanceException):
message = _("Maximum redirects (%(redirects)s) was exceeded.")
class InvalidRedirect(GlanceException):
message = _("Received invalid HTTP redirect.")
class NoServiceEndpoint(GlanceException):
message = _("Response from Keystone does not contain a Glance endpoint.")
class RegionAmbiguity(GlanceException):
message = _("Multiple 'image' service matches for region %(region)s. This "
"generally means that a region is required and you have not "
"supplied one.")
class WorkerCreationFailure(GlanceException):
message = _("Server worker creation failed: %(reason)s.")
class SchemaLoadError(GlanceException):
message = _("Unable to load schema: %(reason)s")
class InvalidObject(GlanceException):
message = _("Provided object does not match schema "
"'%(schema)s': %(reason)s")
class UnsupportedHeaderFeature(GlanceException):
message = _("Provided header feature is unsupported: %(feature)s")
class InUseByStore(GlanceException):
message = _("The image cannot be deleted because it is in use through "
"the backend store outside of Glance.")
class ImageSizeLimitExceeded(GlanceException):
message = _("The provided image is too large.")
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to manipulate a tensor graph in python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
_VARIABLE_OPS = {
"Assign",
"AssignAdd",
"AssignSub",
"Queue",
"ScatterAdd",
"ScatterSub",
"ScatterUpdate",
"TruncatedNormal",
"Variable",
"VariableV2",
}
_CONTROL_FLOW_OP_NAMES_OR_IDENTITY = [
"Switch",
"Enter",
"Exit",
"Identity",
"Merge",
"NextIteration",
]
def _is_variable_op(op):
"""Returns true if 'op' refers to a Variable node."""
return op in _VARIABLE_OPS
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.must_run_on_cpu`")
@tf_export(v1=["graph_util.must_run_on_cpu"])
def must_run_on_cpu(node, pin_variables_on_cpu=False):
"""Returns True if the given node_def must run on CPU, otherwise False.
Args:
node: The node to be assigned to a device. Could be either an ops.Operation
or NodeDef.
pin_variables_on_cpu: If True, this function will return False if node_def
represents a variable-related op.
Returns:
True if the given node must run on CPU, otherwise False.
"""
if isinstance(node, ops.Operation):
node_def = node.node_def
else:
assert isinstance(node, node_def_pb2.NodeDef)
node_def = node
# If the op is a variable-related op, should we pin it on CPU?
if pin_variables_on_cpu and _is_variable_op(node_def.op):
return True
# Constant operations producing a string or int32 must run on CPU.
if node_def.op == "Const":
# Get the value of the 'dtype' attr
dtype = node_def.attr["dtype"].type
if dtype == dtypes.string or dtype == dtypes.int32:
return True
if node_def.op in ["DynamicStitch", "ParallelDynamicStitch"]:
dtype = node_def.attr["T"].type
if dtype == dtypes.int32:
# DynamicStitch on GPU only works for int32 values.
return True
if node_def.op in ["Cast"]:
dtype = node_def.attr["SrcT"].type
if dtype == dtypes.int32:
# Cast on GPU does not works for int32 values.
return True
return False
################################################################################
#
# device functions for use in with g.device(...)
#
################################################################################
def _node_name(n):
if n.startswith("^"):
return n[1:]
else:
return n.split(":")[0]
def _extract_graph_summary(graph_def):
"""Extracts useful information from the graph and returns them."""
name_to_input_name = {} # Keyed by the dest node name.
name_to_node = {} # Keyed by node name.
# Keeps track of node sequences. It is important to still output the
# operations in the original order.
name_to_seq_num = {} # Keyed by node name.
seq = 0
for node in graph_def.node:
n = _node_name(node.name)
name_to_node[n] = node
name_to_input_name[n] = [_node_name(x) for x in node.input]
# Prevent colocated nodes from being lost.
if "_class" in node.attr:
for colocated_node_name in node.attr["_class"].list.s:
colocated_node_decoded = colocated_node_name.decode("utf-8")
if colocated_node_decoded.startswith("loc:@"):
name_to_input_name[n].append(colocated_node_decoded[5:])
name_to_seq_num[n] = seq
seq += 1
return name_to_input_name, name_to_node, name_to_seq_num
def _assert_nodes_are_present(name_to_node, nodes):
"""Assert that nodes are present in the graph."""
for d in nodes:
assert d in name_to_node, "%s is not in graph" % d
def _bfs_for_reachable_nodes(target_nodes, name_to_input_name):
"""Breadth first search for reachable nodes from target nodes."""
nodes_to_keep = set()
# Breadth first search to find all the nodes that we should keep.
next_to_visit = target_nodes[:]
while next_to_visit:
node = next_to_visit[0]
del next_to_visit[0]
if node in nodes_to_keep:
# Already visited this node.
continue
nodes_to_keep.add(node)
if node in name_to_input_name:
next_to_visit += name_to_input_name[node]
return nodes_to_keep
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.extract_sub_graph`")
@tf_export(v1=["graph_util.extract_sub_graph"])
def extract_sub_graph(graph_def, dest_nodes):
"""Extract the subgraph that can reach any of the nodes in 'dest_nodes'.
Args:
graph_def: A graph_pb2.GraphDef proto.
dest_nodes: A list of strings specifying the destination node names.
Returns:
The GraphDef of the sub-graph.
Raises:
TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be a graph_pb2.GraphDef proto.")
if isinstance(dest_nodes, six.string_types):
raise TypeError("dest_nodes must be a list.")
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
graph_def)
_assert_nodes_are_present(name_to_node, dest_nodes)
nodes_to_keep = _bfs_for_reachable_nodes(dest_nodes, name_to_input_name)
nodes_to_keep_list = sorted(
list(nodes_to_keep), key=lambda n: name_to_seq_num[n])
# Now construct the output GraphDef
out = graph_pb2.GraphDef()
for n in nodes_to_keep_list:
out.node.extend([copy.deepcopy(name_to_node[n])])
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
return out
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.tensor_shape_from_node_def_name`"
)
@tf_export(v1=["graph_util.tensor_shape_from_node_def_name"])
def tensor_shape_from_node_def_name(graph, input_name):
"""Convenience function to get a shape from a NodeDef's input string."""
# To get a tensor, the name must be in the form <input>:<port>, for example
# 'Mul:0'. The GraphDef input strings don't always have the port specified
# though, so if there isn't a colon we need to add a default ':0' to the end.
if ":" not in input_name:
canonical_name = input_name + ":0"
else:
canonical_name = input_name
tensor = graph.get_tensor_by_name(canonical_name)
shape = tensor.get_shape()
return shape
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.convert_variables_to_constants`")
@tf_export(v1=["graph_util.convert_variables_to_constants"])
def convert_variables_to_constants(sess,
input_graph_def,
output_node_names,
variable_names_whitelist=None,
variable_names_blacklist=None):
"""Replaces all the variables in a graph with constants of the same values.
If you have a trained graph containing Variable ops, it can be convenient to
convert them all to Const ops holding the same values. This makes it possible
to describe the network fully with a single GraphDef file, and allows the
removal of a lot of ops related to loading and saving the variables.
Args:
sess: Active TensorFlow session containing the variables.
input_graph_def: GraphDef object holding the network.
output_node_names: List of name strings for the result nodes of the graph.
variable_names_whitelist: The set of variable names to convert (by default,
all variables are converted).
variable_names_blacklist: The set of variable names to omit converting
to constants.
Returns:
GraphDef containing a simplified version of the original.
"""
get_input_name = lambda node, index=0: node.input[index].split(":")[0]
def create_const_op(node_name, dtype, data, data_shape=None):
"""Creates a Const op."""
output_node = node_def_pb2.NodeDef()
output_node.op = "Const"
output_node.name = node_name
output_node.attr["dtype"].CopyFrom(dtype)
output_node.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(
data, dtype=dtype.type, shape=data_shape)))
return output_node
# This graph only includes the nodes needed to evaluate the output nodes, and
# removes unneeded nodes like those involved in saving and assignment.
inference_graph = extract_sub_graph(input_graph_def, output_node_names)
# Identify the ops in the graph.
map_name_to_node = {
node.name: node for node in inference_graph.node
}
# Get list of variables.
variable_names = []
variable_dict_names = []
resource_op_types = {}
for node in inference_graph.node:
if node.op in ["Variable", "VariableV2", "VarHandleOp"]:
variable_name = node.name
if ((variable_names_whitelist is not None and
variable_name not in variable_names_whitelist) or
(variable_names_blacklist is not None and
variable_name in variable_names_blacklist)):
continue
variable_dict_names.append(variable_name)
if node.op == "VarHandleOp":
variable_names.append(variable_name + "/Read/ReadVariableOp:0")
else:
variable_names.append(variable_name + ":0")
elif node.op in ["ReadVariableOp", "ResourceGather"]:
# There can be one or more Identity or control flow ops in between the
# ReadVariableOp and VarHandleOp. Store the ops with the associated
# dtypes.
source_op_names = [get_input_name(node)]
while (source_op_names and map_name_to_node[source_op_names[0]].op in
_CONTROL_FLOW_OP_NAMES_OR_IDENTITY):
source_op_name = source_op_names.pop()
if source_op_name not in resource_op_types:
resource_op_types[source_op_name] = node.attr["dtype"]
source_op_names.append(
get_input_name(map_name_to_node[source_op_name]))
if map_name_to_node[source_op_name].op == "Merge":
merge_resource_name = get_input_name(
map_name_to_node[source_op_name], index=1)
if merge_resource_name not in resource_op_types:
resource_op_types[merge_resource_name] = node.attr["dtype"]
source_op_names.append(
get_input_name(map_name_to_node[merge_resource_name]))
for source_node in source_op_names:
if map_name_to_node[source_node].op != "VarHandleOp":
raise ValueError("Cannot find the variable that is an input "
"to the ReadVariableOp.")
# Gets map of variables and the associated data.
if variable_names:
returned_variables = sess.run(variable_names)
else:
returned_variables = []
variables_data_map = dict(zip(variable_dict_names, returned_variables))
logging.info("Froze %d variables.", len(returned_variables))
# Reconstruct the graph with constants in place of variables.
output_graph_def = graph_pb2.GraphDef()
how_many_converted = 0
for input_node in inference_graph.node:
output_node = node_def_pb2.NodeDef()
if input_node.name in variables_data_map:
data = variables_data_map[input_node.name]
output_node = create_const_op(input_node.name, input_node.attr["dtype"],
data, data.shape)
how_many_converted += 1
elif input_node.name in resource_op_types:
# Converts the type of the ops between the ReadVariableOp and VarHandleOp
# from RESOURCE_DT to the appropriate type based on the input they are
# referencing. Do not copy shapes due to incorrect shape info.
output_node.op = input_node.op
output_node.name = input_node.name
for in_node in input_node.input:
output_node.input.append(in_node)
for attr_name in input_node.attr:
if str(attr_name) != "_output_shapes":
output_node.attr[attr_name].CopyFrom(input_node.attr[attr_name])
output_node.attr["T"].CopyFrom(resource_op_types[input_node.name])
elif input_node.op == "ReadVariableOp":
# The first branch converts all VarHandleOps of ResourceVariables to
# constants, so we need to convert the associated ReadVariableOps to
# Identity ops.
output_node.op = "Identity"
output_node.name = input_node.name
output_node.input.extend([input_node.input[0]])
output_node.attr["T"].CopyFrom(input_node.attr["dtype"])
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
elif input_node.op == "ResourceGather":
# The first branch converts all VarHandleOps of ResourceGather to
# constants, so we need to convert the associated ResourceGather to Gather
# ops with a Const axis feeding into it.
if input_node.attr["batch_dims"].i != 0:
raise ValueError("batch_dims != 0 is not supported by freeze_graph.")
axis_data = input_node.attr["batch_dims"].i
axis_node_name = input_node.name + "/axis"
axis_dtype = input_node.attr["Tindices"]
output_axis_node = create_const_op(axis_node_name, axis_dtype, axis_data)
output_graph_def.node.extend([output_axis_node])
output_node.op = "GatherV2"
output_node.name = input_node.name
output_node.input.extend(
[input_node.input[0], input_node.input[1], axis_node_name])
output_node.attr["Tparams"].CopyFrom(input_node.attr["dtype"])
output_node.attr["Tindices"].CopyFrom(input_node.attr["Tindices"])
output_node.attr["Taxis"].CopyFrom(axis_dtype)
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
else:
output_node.CopyFrom(input_node)
output_graph_def.node.extend([output_node])
output_graph_def.library.CopyFrom(inference_graph.library)
logging.info("Converted %d variables to const ops.", how_many_converted)
return output_graph_def
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.remove_training_nodes`")
@tf_export(v1=["graph_util.remove_training_nodes"])
def remove_training_nodes(input_graph, protected_nodes=None):
"""Prunes out nodes that aren't needed for inference.
There are nodes like Identity and CheckNumerics that are only useful
during training, and can be removed in graphs that will be used for
nothing but inference. Here we identify and remove them, returning an
equivalent graph. To be specific, CheckNumerics nodes are always removed, and
Identity nodes that aren't involved in control edges are spliced out so that
their input and outputs are directly connected.
Args:
input_graph: Model to analyze and prune.
protected_nodes: An optional list of names of nodes to be kept
unconditionally. This is for example useful to preserve Identity output
nodes.
Returns:
A list of nodes with the unnecessary ones removed.
"""
if not protected_nodes:
protected_nodes = []
types_to_remove = {"CheckNumerics": True}
input_nodes = input_graph.node
names_to_remove = {}
for node in input_nodes:
if node.op in types_to_remove and node.name not in protected_nodes:
names_to_remove[node.name] = True
nodes_after_removal = []
for node in input_nodes:
if node.name in names_to_remove:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
if input_name in names_to_remove:
continue
new_node.input.append(full_input_name)
nodes_after_removal.append(new_node)
types_to_splice = {"Identity": True}
control_input_names = set()
node_names_with_control_input = set()
for node in nodes_after_removal:
for node_input in node.input:
if "^" in node_input:
control_input_names.add(node_input.replace("^", ""))
node_names_with_control_input.add(node.name)
names_to_splice = {}
for node in nodes_after_removal:
if node.op in types_to_splice and node.name not in protected_nodes:
# We don't want to remove nodes that have control edge inputs, because
# they might be involved in subtle dependency issues that removing them
# will jeopardize.
if node.name not in node_names_with_control_input:
names_to_splice[node.name] = node.input[0]
# We also don't want to remove nodes which are used as control edge inputs.
names_to_splice = {name: value for name, value in names_to_splice.items()
if name not in control_input_names}
nodes_after_splicing = []
for node in nodes_after_removal:
if node.name in names_to_splice:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
while input_name in names_to_splice:
full_input_name = names_to_splice[input_name]
input_name = re.sub(r"^\^", "", full_input_name)
new_node.input.append(full_input_name)
nodes_after_splicing.append(new_node)
output_graph = graph_pb2.GraphDef()
output_graph.node.extend(nodes_after_splicing)
return output_graph
|
|
# -*- coding: UTF-8 -*-
from conference import cachef
from conference import dataaccess as cdata
from conference import models as cmodels
from assopy import models as amodels
from assopy import utils as autils
from p3 import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
cache_me = cachef.CacheFunction(prefix='p3:')
def profile_data(uid, preload=None):
if preload is None:
preload = {}
profile = cdata.profile_data(uid)
try:
p3p = preload['profile']
except KeyError:
try:
p3p = models.P3Profile.objects\
.select_related('profile__user')\
.get(profile=uid)
except models.P3Profile.DoesNotExist:
p3p = None
if p3p:
try:
interests = preload['interests']
except KeyError:
interests = [ t.name for t in p3p.interests.all() ]
profile.update({
'tagline': p3p.tagline,
'interests': interests,
'twitter': p3p.twitter,
'country': p3p.country,
'image': p3p.public_profile_image_url(),
'image_gravatar': p3p.image_gravatar,
'image_url': p3p.image_url,
'spam_recruiting': p3p.spam_recruiting,
'spam_user_message': p3p.spam_user_message,
'spam_sms': p3p.spam_sms,
})
if profile['talks']:
try:
spk = preload['speaker']
except KeyError:
try:
spk = models.SpeakerConference.objects.get(speaker=uid)
except models.SpeakerConference.DoesNotExist:
spk = None
if spk:
profile.update({
'first_time_speaker': spk.first_time,
})
return profile
def _i_profile_data(sender, **kw):
# invalidation signal is handled by cachef
return 'profile:%s' % (kw['instance'].profile_id,)
profile_data = cache_me(
signals=(cdata.profile_data.invalidated,),
models=(models.P3Profile,),
key='profile:%(uid)s')(profile_data, _i_profile_data)
def talk_data(tid, preload=None):
if preload is None:
preload = {}
talk = cdata.talk_data(tid)
try:
p3t = preload['talk']
except KeyError:
p3t = models.P3Talk.objects\
.get(talk=tid)
talk['sub_community'] = (p3t.sub_community, p3t.get_sub_community_display())
return talk
def _i_talk_data(sender, **kw):
# invalidation signal is handled by cachef
return 'talk:%s' % (kw['instance'].talk_id,)
talk_data = cache_me(
signals=(cdata.talk_data.invalidated,),
models=(models.P3Talk,),
key='talk:%(tid)s')(talk_data, _i_talk_data)
def profiles_data(uids):
cached = zip(uids, profile_data.get_from_cache([ (x,) for x in uids ]))
missing = [ x[0] for x in cached if x[1] is cache_me.CACHE_MISS ]
preload = {}
profiles = models.P3Profile.objects\
.filter(profile__in=missing)\
.select_related('profile__user')
tags = cmodels.ConferenceTaggedItem.objects\
.filter(
content_type=ContentType.objects.get_for_model(models.P3Profile),
object_id__in=missing
)\
.values('object_id', 'tag__name')
speakers = models.SpeakerConference.objects\
.filter(speaker__in=missing)
for p in profiles:
preload[p.profile_id] = {
'profile': p,
'interests': set(),
}
for row in tags:
preload[row['object_id']]['interests'].add(row['tag__name'])
for spk in speakers:
preload[spk.speaker_id]['speaker'] = spk
cdata.profiles_data(missing)
output = []
for ix, e in enumerate(cached):
pid, val = e
if val is cache_me.CACHE_MISS:
val = profile_data(pid, preload=preload[pid])
output.append(val)
return output
def _user_ticket(user, conference):
q1 = user.ticket_set.all()\
.conference(conference)
q2 = cmodels.Ticket.objects\
.filter(p3_conference__assigned_to=user.email)\
.filter(fare__conference=conference)
qs = (q1 | q2)\
.select_related('orderitem__order', 'fare')
return qs
def _ticket_complete(t):
# considering complete tickets paid with bank transfer or by
# admin. Being the IPN notification almost simultaneous with the
# user coming back on our site, by filtering out unconfirmed orders
# I'm also excluding old records sitting in the db because of
# unconfirmed paypal payments or because the user came back to
# our site using the back button.
try:
order = t.orderitem.order
except amodels.OrderItem.DoesNotExist:
return False
return (order.method in ('bank', 'admin')) or order.complete()
def all_user_tickets(uid, conference):
"""
Cache-friendly version of user_tickets: returns a list of
(ticket_id, fare_type, fare_code, complete)
for each ticket associated to the user.
"""
qs = _user_ticket(User.objects.get(id=uid), conference)
output = []
for t in qs:
output.append((
t.id, t.fare.ticket_type, t.fare.code,
_ticket_complete(t)
))
return output
def _i_all_user_tickets(sender, **kw):
o = kw['instance']
if sender is models.TicketConference:
conference = o.ticket.fare.conference
params = [ (o.ticket.user_id, conference) ]
if o.assigned_to:
try:
uid = autils.get_user_account_from_email(o.assigned_to).id
except User.DoesNotExist:
pass
else:
params.append((uid, conference))
elif sender is cmodels.Ticket:
params = [ (o.user_id, o.fare.conference) ]
else:
uid = o.user.user_id
try:
conference = o.orderitem_set\
.all()\
.distinct()\
.values('ticket__fare__conference')[0]
except IndexError:
return []
params = [ (uid, conference) ]
return [ 'all_user_tickets:%s:%s' % (uid, conference) for uid, conference in params ]
all_user_tickets = cache_me(
models=(models.TicketConference, cmodels.Ticket, amodels.Order,),
key='all_user_tickets:%(uid)s:%(conference)s')(all_user_tickets, _i_all_user_tickets)
def user_tickets(user, conference, only_complete=False):
"""
Returns the tickets associated with the user (because s/he bought them
or because they've been assigned to him/her)
"""
qs = _user_ticket(user, conference)
if not only_complete:
return qs
else:
# I'm not showing tickets associated to paypal orders that are not yet
# "complete"; as the IPN notification is almost simultaneous with the
# return on our site, by filtering out the unconfirmed orders
# I'm also ignoring old records sitting inthe db after the user
# didn't confirm the paypal payment or after returning to our site
# using the back button.
tickets = list(qs)
for ix, t in list(enumerate(tickets))[::-1]:
if not _ticket_complete(t):
del tickets[ix]
return tickets
def conference_users(conference, speakers=True):
"""
Returns the list of all user_ids partecipating to the conference.
"""
ticket_qs = cmodels.Ticket.objects\
.filter(fare__conference=conference)\
.filter(fare__code__startswith='T')\
# Unassigned tickets
q1 = User.objects\
.filter(id__in=\
ticket_qs\
.filter(p3_conference__assigned_to__in=(None, ''))\
.values_list('user', flat=True)
)\
.values_list('id', flat=True)
# Assigned tickets
q2 = User.objects\
.filter(email__in=\
ticket_qs\
.exclude(p3_conference__assigned_to__in=(None,''))\
.values('p3_conference__assigned_to')
)\
.values_list('id', flat=True)
if speakers:
q3 = User.objects\
.filter(id__in=\
cmodels.TalkSpeaker.objects\
.filter(talk__conference=conference, talk__status='accepted')\
.values('speaker')
)\
.values_list('id', flat=True)
else:
q3 = User.objects.none()
return q1 | q2 | q3
def tags():
"""
Same as `conference.dataaccess.tags` but removing data about
tags associated to a non-public profile.
"""
from conference.dataaccess import tags as ctags
cid = ContentType.objects.get(app_label='p3', model='p3profile').id
hprofiles = set(models.P3Profile.objects\
.exclude(profile__visibility__in=('m', 'p'))\
.values_list('profile_id', flat=True))
hset = set([(cid, pid) for pid in hprofiles])
data = ctags()
for tag, objects in data.items():
data[tag] = objects - hset
return data
tags = cache_me(
signals=(cdata.tags.invalidated,),
models=(models.P3Profile, cmodels.AttendeeProfile))(tags)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.