gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for manipulating System Metadata.
- Translate System Metadata between XML and PyXB.
- Translate System Metadata between PyXB and GMN database representations.
- Query the database for System Metadata properties.
"""
import os
import pyxb
import d1_common.const
import d1_common.date_time
import d1_common.types
import d1_common.types.dataoneTypes
import d1_common.types.exceptions
import d1_common.utils
import d1_common.utils.filesystem
import d1_common.utils.ulog
import d1_common.wrap.access_policy
import d1_common.xml
import django.urls
import django.urls.base
import d1_gmn.app
import d1_gmn.app.auth
import d1_gmn.app.did
import d1_gmn.app.model_util
import d1_gmn.app.models
import d1_gmn.app.object_format_cache
import d1_gmn.app.revision
import d1_gmn.app.sciobj_store
import d1_gmn.app.views.util
def archive_sciobj(pid):
"""Set the status of an object to archived.
Preconditions:
- The object with the pid is verified to exist.
- The object is not a replica.
- The object is not archived.
"""
sciobj_model = d1_gmn.app.model_util.get_sci_model(pid)
sciobj_model.is_archived = True
sciobj_model.save()
_update_modified_timestamp(sciobj_model)
def serialize(sysmeta_pyxb, pretty=False):
try:
return d1_common.xml.serialize_for_transport(
sysmeta_pyxb, pretty, xslt_url=django.urls.base.reverse("home_xslt")
)
except pyxb.IncompleteElementContentError as e:
raise d1_common.types.exceptions.ServiceFailure(
0, 'Unable to serialize PyXB to XML. error="{}"'.format(e.details())
)
def deserialize(xml_str):
return d1_gmn.app.views.util.deserialize(xml_str)
def create_or_update(sysmeta_pyxb, sciobj_url=None):
"""Create or update database representation of a System Metadata object and closely
related internal state.
Args:
sciobj_url: url
- If not passed on create, storage in the internal sciobj store
is assumed
- If passed on create, it can reference a location in the internal sciobj
store, or an arbitrary location on disk, or a remote web server. See the
sciobj_store module for more information
- If not passed on update, the sciobj location remains unchanged
- If passed on update, the sciobj location is updated
Preconditions:
- All values in ``sysmeta_pyxb`` must be valid for the operation being performed
"""
# TODO: Make sure that old sections are removed if not included in update.
pid = d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
if sciobj_url is None:
sciobj_url = d1_gmn.app.sciobj_store.get_rel_sciobj_file_url_by_pid(pid)
try:
sci_model = d1_gmn.app.model_util.get_sci_model(pid)
except d1_gmn.app.models.ScienceObject.DoesNotExist:
sci_model = d1_gmn.app.models.ScienceObject()
sci_model.pid = d1_gmn.app.did.get_or_create_did(pid)
sci_model.url = sciobj_url
sci_model.serial_version = sysmeta_pyxb.serialVersion
sci_model.uploaded_timestamp = d1_common.date_time.normalize_datetime_to_utc(
sysmeta_pyxb.dateUploaded
)
_base_pyxb_to_model(sci_model, sysmeta_pyxb)
sci_model.save()
if _has_media_type_pyxb(sysmeta_pyxb):
_media_type_pyxb_to_model(sci_model, sysmeta_pyxb)
_access_policy_pyxb_to_model(sci_model, sysmeta_pyxb)
if _has_replication_policy_pyxb(sysmeta_pyxb):
_replication_policy_pyxb_to_model(sci_model, sysmeta_pyxb)
replica_pyxb_to_model(sci_model, sysmeta_pyxb)
revision_pyxb_to_model(sci_model, sysmeta_pyxb, pid)
sci_model.save()
return sci_model
def get_filename(sciobj_model):
"""Generate a safe filename for SciObj.
- The returned filename will not have any characters, such as slashes or
backslashes, that may cause file access to occur outside of the intended directory
in the filesystem.
- If a filename is provided in SysMeta but is missing base_name (such as ".bin"),
use the PID as the basename.
- If a filename is provided in SysMeta but is missing the extension(such as "my
file"), use extension derived from the FormatId.
- If filename is not provided in SysMeta, handle it as if both base_name and
extension is missing.
- When using extension derived from the FormatId, if the FormatId is unknown (not in
the CN ObjectFormatList cache), use ".data" as the extension.
"""
file_name = ""
file_ext = ""
if sciobj_model.filename:
file_name, file_ext = os.path.splitext(sciobj_model.filename)
# Fix filenames such as ".bin", which are split into (".bin", "")
if file_name.startswith(".") and file_ext == "":
file_name, file_ext = file_ext, file_name
return d1_common.utils.filesystem.gen_safe_path_element(
(file_name or sciobj_model.pid.did)
+ (
file_ext
or d1_gmn.app.object_format_cache.get_filename_extension(
sciobj_model.format.format, ".data"
)
)
)
def update_modified_timestamp(pid):
sci_model = d1_gmn.app.model_util.get_sci_model(pid)
_update_modified_timestamp(sci_model)
def model_to_pyxb(pid):
return _model_to_pyxb(pid)
def _model_to_pyxb(pid):
sciobj_model = d1_gmn.app.model_util.get_sci_model(pid)
sysmeta_pyxb = _base_model_to_pyxb(sciobj_model)
if _has_media_type_db(sciobj_model):
sysmeta_pyxb.mediaType = _media_type_model_to_pyxb(sciobj_model)
if _has_access_policy_db(sciobj_model):
sysmeta_pyxb.accessPolicy = _access_policy_model_to_pyxb(sciobj_model)
if _has_replication_policy_db(sciobj_model):
sysmeta_pyxb.replicationPolicy = _replication_policy_model_to_pyxb(sciobj_model)
sysmeta_pyxb.replica = replica_model_to_pyxb(sciobj_model)
return sysmeta_pyxb
def _base_pyxb_to_model(sci_model, sysmeta_pyxb):
sci_model.modified_timestamp = d1_common.date_time.normalize_datetime_to_utc(
sysmeta_pyxb.dateSysMetadataModified
)
sci_model.format = d1_gmn.app.models.format(sysmeta_pyxb.formatId)
sci_model.filename = getattr(sysmeta_pyxb, "fileName", None)
sci_model.checksum = d1_common.xml.get_req_val(sysmeta_pyxb.checksum)
sci_model.checksum_algorithm = d1_gmn.app.models.checksum_algorithm(
sysmeta_pyxb.checksum.algorithm
)
sci_model.size = sysmeta_pyxb.size
if sysmeta_pyxb.submitter:
sci_model.submitter = d1_gmn.app.models.subject(
d1_common.xml.get_req_val(sysmeta_pyxb.submitter)
)
sci_model.rights_holder = d1_gmn.app.models.subject(
d1_common.xml.get_req_val(sysmeta_pyxb.rightsHolder)
)
sci_model.origin_member_node = d1_gmn.app.models.node(
d1_common.xml.get_req_val(sysmeta_pyxb.originMemberNode)
)
sci_model.authoritative_member_node = d1_gmn.app.models.node(
d1_common.xml.get_req_val(sysmeta_pyxb.authoritativeMemberNode)
)
sci_model.is_archived = sysmeta_pyxb.archived or False
def _base_model_to_pyxb(sciobj_model):
base_pyxb = d1_common.types.dataoneTypes.systemMetadata()
base_pyxb.identifier = d1_common.types.dataoneTypes.Identifier(sciobj_model.pid.did)
base_pyxb.serialVersion = sciobj_model.serial_version
base_pyxb.dateSysMetadataModified = d1_common.date_time.normalize_datetime_to_utc(
sciobj_model.modified_timestamp
)
base_pyxb.dateUploaded = sciobj_model.uploaded_timestamp
base_pyxb.formatId = sciobj_model.format.format
# Generate a safe filename for SciObj. Fall back to PID and file extension derived
# from formatId if required.
base_pyxb.fileName = get_filename(sciobj_model)
base_pyxb.checksum = d1_common.types.dataoneTypes.Checksum(sciobj_model.checksum)
base_pyxb.checksum.algorithm = sciobj_model.checksum_algorithm.checksum_algorithm
base_pyxb.size = sciobj_model.size
base_pyxb.submitter = sciobj_model.submitter.subject
base_pyxb.rightsHolder = sciobj_model.rights_holder.subject
base_pyxb.originMemberNode = sciobj_model.origin_member_node.urn
base_pyxb.authoritativeMemberNode = sciobj_model.authoritative_member_node.urn
base_pyxb.obsoletes = d1_gmn.app.did.get_did_by_foreign_key(sciobj_model.obsoletes)
base_pyxb.obsoletedBy = d1_gmn.app.did.get_did_by_foreign_key(
sciobj_model.obsoleted_by
)
base_pyxb.archived = sciobj_model.is_archived
base_pyxb.seriesId = d1_gmn.app.revision.get_sid_by_pid(sciobj_model.pid.did)
return base_pyxb
def _update_modified_timestamp(sci_model):
sci_model.modified_timestamp = d1_common.date_time.utc_now()
sci_model.save()
# ------------------------------------------------------------------------------
# Media Type
# ------------------------------------------------------------------------------
# <!--Optional:-->
# <mediaType name="string">
# <!--Zero or more repetitions:-->
# <property name="string">string1</property>
# <property name="string">string3</property>
# <property name="string">string4</property>
# </mediaType>
def _has_media_type_pyxb(sysmeta_pyxb):
return hasattr(sysmeta_pyxb, "mediaType") and sysmeta_pyxb.mediaType is not None
def _media_type_pyxb_to_model(sci_model, sysmeta_pyxb):
_delete_existing_media_type(sysmeta_pyxb)
media_type_model = _insert_media_type_name_row(sci_model, sysmeta_pyxb.mediaType)
_insert_media_type_property_rows(media_type_model, sysmeta_pyxb.mediaType)
def _delete_existing_media_type(sysmeta_pyxb):
d1_gmn.app.models.MediaType.objects.filter(
sciobj__pid__did=d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
).delete()
def _insert_media_type_name_row(sci_model, media_type_pyxb):
media_type_model = d1_gmn.app.models.MediaType(
sciobj=sci_model, name=media_type_pyxb.name
)
media_type_model.save()
return media_type_model
def _insert_media_type_property_rows(media_type_model, media_type_pyxb):
for p in media_type_pyxb.property_:
media_type_property_model = d1_gmn.app.models.MediaTypeProperty(
media_type=media_type_model, name=p.name, value=d1_common.xml.get_req_val(p)
)
media_type_property_model.save()
def _has_media_type_db(sciobj_model):
return d1_gmn.app.models.MediaType.objects.filter(sciobj=sciobj_model).exists()
def _media_type_model_to_pyxb(sciobj_model):
media_type_model = d1_gmn.app.models.MediaType.objects.get(sciobj=sciobj_model)
media_type_pyxb = d1_common.types.dataoneTypes.MediaType()
media_type_pyxb.name = media_type_model.name
for media_type_property_model in d1_gmn.app.models.MediaTypeProperty.objects.filter(
media_type=media_type_model
).order_by("name", "value"):
media_type_property_pyxb = d1_common.types.dataoneTypes.MediaTypeProperty(
media_type_property_model.value, name=media_type_property_model.name
)
media_type_pyxb.property_.append(media_type_property_pyxb)
return media_type_pyxb
# ------------------------------------------------------------------------------
# Access Policy
# ------------------------------------------------------------------------------
def _access_policy_pyxb_to_model(sci_model, sysmeta_pyxb):
"""Create or update the database representation of the sysmeta_pyxb access policy.
If called without an access policy, any existing permissions on the object
are removed and the access policy for the rights holder is recreated.
Preconditions:
- Subject has changePermission for object.
Postconditions:
- The Permission and related tables contain the new access policy.
Notes:
- There can be multiple rules in a policy and each rule can contain multiple
subjects. So there are two ways that the same subject can be specified multiple
times in a policy. If this happens, multiple, conflicting action levels may be
provided for the subject. This is handled by checking for an existing row for
the subject for this object and updating it if it contains a lower action
level. The end result is that there is one row for each combination of subject
and object, and this row contains the highest action level.
"""
_delete_existing_access_policy(sysmeta_pyxb)
# Add changePermission for rights holder.
allow_rights_holder = d1_common.types.dataoneTypes.AccessRule()
permission = d1_common.types.dataoneTypes.Permission(
d1_gmn.app.auth.CHANGEPERMISSION_STR
)
allow_rights_holder.permission.append(permission)
allow_rights_holder.subject.append(
d1_common.xml.get_req_val(sysmeta_pyxb.rightsHolder)
)
top_level = _get_highest_level_action_for_rule(allow_rights_holder)
_insert_permission_rows(sci_model, allow_rights_holder, top_level)
# Create db entries for all subjects for which permissions have been granted.
if _has_access_policy_pyxb(sysmeta_pyxb):
for allow_rule in sysmeta_pyxb.accessPolicy.allow:
top_level = _get_highest_level_action_for_rule(allow_rule)
_insert_permission_rows(sci_model, allow_rule, top_level)
def _has_access_policy_db(sciobj_model):
return d1_gmn.app.models.Permission.objects.filter(sciobj=sciobj_model).exists()
def _has_access_policy_pyxb(sysmeta_pyxb):
return (
hasattr(sysmeta_pyxb, "accessPolicy") and sysmeta_pyxb.accessPolicy is not None
)
def _delete_existing_access_policy(sysmeta_pyxb):
d1_gmn.app.models.Permission.objects.filter(
sciobj__pid__did=d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
).delete()
def _get_highest_level_action_for_rule(allow_rule):
top_level = 0
for permission in allow_rule.permission:
level = d1_gmn.app.auth.action_to_level(permission)
if level > top_level:
top_level = level
return top_level
def _insert_permission_rows(sci_model, allow_rule, top_level):
for s in allow_rule.subject:
permission_model = d1_gmn.app.models.Permission(
sciobj=sci_model,
subject=d1_gmn.app.models.subject(d1_common.xml.get_req_val(s)),
level=top_level,
)
permission_model.save()
def _access_policy_model_to_pyxb(sciobj_model):
access_policy_pyxb = d1_common.types.dataoneTypes.AccessPolicy()
for permission_model in d1_gmn.app.models.Permission.objects.filter(
sciobj=sciobj_model
).order_by("subject", "level", "sciobj__pid__did"):
# Skip implicit permissions for rightsHolder.
if permission_model.subject.subject == sciobj_model.rights_holder.subject:
continue
access_rule_pyxb = d1_common.types.dataoneTypes.AccessRule()
permission_pyxb = d1_common.types.dataoneTypes.Permission(
d1_gmn.app.auth.level_to_action(permission_model.level)
)
access_rule_pyxb.permission.append(permission_pyxb)
access_rule_pyxb.subject.append(permission_model.subject.subject)
access_policy_pyxb.allow.append(access_rule_pyxb)
if len(access_policy_pyxb.allow):
return d1_common.wrap.access_policy.get_normalized_pyxb(access_policy_pyxb)
# ------------------------------------------------------------------------------
# Replication Policy
# ------------------------------------------------------------------------------
# <replicationPolicy xmlns="" replicationAllowed="false" numberReplicas="0">
# <preferredMemberNode>preferredMemberNode0</preferredMemberNode>
# <preferredMemberNode>preferredMemberNode1</preferredMemberNode>
# <blockedMemberNode>blockedMemberNode0</blockedMemberNode>
# <blockedMemberNode>blockedMemberNode1</blockedMemberNode>
# </replicationPolicy>
def _replication_policy_pyxb_to_model(sciobj_model, sysmeta_pyxb):
_delete_existing_replication_policy(sciobj_model)
replication_policy_model = d1_gmn.app.models.ReplicationPolicy()
replication_policy_model.sciobj = sciobj_model
replication_policy_model.replication_is_allowed = d1_common.xml.get_opt_attr(
sysmeta_pyxb.replicationPolicy,
"replicationAllowed",
d1_common.const.DEFAULT_REPLICATION_ALLOWED,
)
replication_policy_model.desired_number_of_replicas = d1_common.xml.get_opt_attr(
sysmeta_pyxb.replicationPolicy,
"numberReplicas",
d1_common.const.DEFAULT_NUMBER_OF_REPLICAS,
)
replication_policy_model.save()
def add(node_ref_pyxb, rep_node_model):
for rep_node_urn in node_ref_pyxb:
# node_urn_model = d1_gmn.app.models.Node.objects.get_or_create(
# urn=rep_node_urn.value()
# )[0]
node_urn_model = d1_gmn.app.models.node(
d1_common.xml.get_req_val(rep_node_urn)
)
rep_node_obj = rep_node_model()
rep_node_obj.node = node_urn_model
rep_node_obj.replication_policy = replication_policy_model
rep_node_obj.save()
add(
sysmeta_pyxb.replicationPolicy.preferredMemberNode,
d1_gmn.app.models.PreferredMemberNode,
)
add(
sysmeta_pyxb.replicationPolicy.blockedMemberNode,
d1_gmn.app.models.BlockedMemberNode,
)
return replication_policy_model
def _has_replication_policy_db(sciobj_model):
return d1_gmn.app.models.ReplicationPolicy.objects.filter(
sciobj=sciobj_model
).exists()
def _delete_existing_replication_policy(sciobj_model):
d1_gmn.app.models.ReplicationPolicy.objects.filter(sciobj=sciobj_model).delete()
def _has_replication_policy_pyxb(sysmeta_pyxb):
return (
hasattr(sysmeta_pyxb, "replicationPolicy")
and sysmeta_pyxb.replicationPolicy is not None
)
def _replication_policy_model_to_pyxb(sciobj_model):
replication_policy_model = d1_gmn.app.models.ReplicationPolicy.objects.get(
sciobj=sciobj_model
)
replication_policy_pyxb = d1_common.types.dataoneTypes.ReplicationPolicy()
replication_policy_pyxb.replicationAllowed = (
replication_policy_model.replication_is_allowed
)
replication_policy_pyxb.numberReplicas = (
replication_policy_model.desired_number_of_replicas
)
def add(rep_pyxb, rep_node_model):
for rep_node in rep_node_model.objects.filter(
replication_policy=replication_policy_model
).order_by("node__urn"):
rep_pyxb.append(rep_node.node.urn)
add(
replication_policy_pyxb.preferredMemberNode,
d1_gmn.app.models.PreferredMemberNode,
)
add(replication_policy_pyxb.blockedMemberNode, d1_gmn.app.models.BlockedMemberNode)
return replication_policy_pyxb
def revision_pyxb_to_model(sci_model, sysmeta_pyxb, pid):
sid = d1_common.xml.get_opt_val(sysmeta_pyxb, "seriesId")
obsoletes_pid = d1_common.xml.get_opt_val(sysmeta_pyxb, "obsoletes")
obsoleted_by_pid = d1_common.xml.get_opt_val(sysmeta_pyxb, "obsoletedBy")
d1_gmn.app.revision.set_revision_links(sci_model, obsoletes_pid, obsoleted_by_pid)
d1_gmn.app.revision.create_or_update_chain(
pid, sid, obsoletes_pid, obsoleted_by_pid
)
# ------------------------------------------------------------------------------
# Remote Replica
# ------------------------------------------------------------------------------
# <replica xmlns="">
# <replicaMemberNode>replicaMemberNode0</replicaMemberNode>
# <replicationStatus>queued</replicationStatus>
# <replicaVerified>2006-05-04T18:13:51.0</replicaVerified>
# </replica>
def replica_pyxb_to_model(sciobj_model, sysmeta_pyxb):
d1_gmn.app.models.RemoteReplica.objects.filter(sciobj=sciobj_model).delete()
for replica_pyxb in sysmeta_pyxb.replica:
_register_remote_replica(sciobj_model, replica_pyxb)
def _register_remote_replica(sciobj_model, replica_pyxb):
replica_info_model = d1_gmn.app.models.replica_info(
status_str=replica_pyxb.replicationStatus,
source_node_urn=d1_common.xml.get_req_val(replica_pyxb.replicaMemberNode),
timestamp=d1_common.date_time.normalize_datetime_to_utc(
replica_pyxb.replicaVerified
),
)
d1_gmn.app.models.remote_replica(
sciobj_model=sciobj_model, replica_info_model=replica_info_model
)
def replica_model_to_pyxb(sciobj_model):
replica_pyxb_list = []
for replica_model in d1_gmn.app.models.RemoteReplica.objects.filter(
sciobj=sciobj_model
).order_by("info__timestamp", "info__member_node__urn"):
replica_pyxb = d1_common.types.dataoneTypes.Replica()
replica_pyxb.replicaMemberNode = replica_model.info.member_node.urn
replica_pyxb.replicationStatus = replica_model.info.status.status
replica_pyxb.replicaVerified = d1_common.date_time.normalize_datetime_to_utc(
replica_model.info.timestamp
)
replica_pyxb_list.append(replica_pyxb)
return replica_pyxb_list
| |
from games import Game
from copy import deepcopy
class C4Game(Game):
def __init__(self, state):
self.initial = state
def actions(self, state):
columns = {0, 1, 2, 3, 4, 5, 6}
# remove the columns that are full
#.grid[column]) >= self.size['r']
for c in [0, 1, 2, 3, 4, 5, 6]:
if len(state.grid[c]) >= 6:
columns.remove(c)
return list(columns)
# for c in [ 0, 1, 2, 3, 4, 5, 6 ]:
# if state.grid[c] >= state.size['r']:
# columns.remove(c)
# return list(columns)
# defines the order of play
def opponent(self, player):
if player == 'X':
return 'O'
if player == 'O':
return 'X'
return None
def to_move(self, player):
if player.first_player == False:
return 'O'
if player.first_player == True:
return 'X'
return None
def vertical4(self, state, player):
for c in range(state.size['c']):
height = len(state.grid[c])
if height < 4:
return 0
count = 0
for r in range(height):
if state.grid[c][r] != player:
count += 1
else:
break
if count == 4:
return 1
return 0
def horizontal4(self, state, player):
for r in range(state.size['r']):
width = len(state.grid[r])
if width < 4:
return 0
count = 0
for c in range(width):
if state.grid[r][c] != player:
count += 1
else:
break
if count == 4:
return 1
return 0
def rightDiagonal4(self, state, player):
pass
def leftDiagonal4(self, state, player):
pass
def utility(self, state, player):
"Return the value to player; 1 for win, -1 for loss, 0 otherwise."
# if self.vertical4(state, player):
# return 1
# if self.horizontal4(state, player):
# return 1
#if self.rightDiagonal4(state, player):
# return 1
#if self.leftDiagonal4(state, player):
# return 1
opponent = self.opponent(player)
if ConnectFour.drop(state,0) < 0 :
#ConnectFour.drop(state,0)
return -1
if ConnectFour.drop(state, 1) < 0:
return -1
if ConnectFour.drop(state, 2) < 0:
return -1
if ConnectFour.drop(state, 3) < 0:
return -1
if ConnectFour.drop(state, 4) < 0:
return -1
if ConnectFour.drop(state, 5) < 0:
return -1
if ConnectFour.drop(state, 6) < 0:
return -1
if ConnectFour.drop(state, 7) < 0:
return -1
else:
return 0
# if self.vertical4(state, opponent):
# return -1
# if self.horizontal4(state, opponent):
# return -1
# if self.rightDiagonal4(state, opponent):
# return -1
# if self.leftDiagonal4(state, opponent):
# return -1
# add other heuristics
return 0
# try:
# return state.utility if player == 'X' else -state.utility
# except:
# pass
# board = self.gameState.grid
# util = self.check_win(board) #, 'X')
# if util == 0:
# util = -self.check_win(board) #, 'O')
# state.utility = util
# return util if player == 'X' else -util
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
if self.utility(state, 'X') == 1:
return True
if self.utility(state, 'O') == 1:
return True
if len(self.actions(state)) == 0:
return True
return False
'''
state is an instance of ConnectFour
move is 0..6
'''
def result(self, state, move):
newState = deepcopy(state)
# drop the move into the newState
newState.drop(move)
return newState
#def utility(self, state, player):
# "Player relative score"
# return 0
def terminal_test(self, state):
"A state is terminal there's 4 in a row or the board is full"
return state.check() != False
class ConnectFour:
def __init__(self, columns=7, rows=6, player1='X', player2='O'):
self.size = {'c': columns, 'r': rows}
# self.grid = []
self.first_player = True
self.players = {True: player1, False: player2}
self.game_over = False
self.grid = [[] for i in range(self.size['c'])]
def drop(self, column):
# if self.game_over: return False
if column < 0 or column >= self.size['c']:
return False
if len(self.grid[column]) >= self.size['r']:
return False
self.grid[column].append(self.players[self.first_player])
c = self.check()
if c == False:
self.first_player = not self.first_player
return 1
else:
self.game_over = c
return -1
def check(self):
d = 0
for i, column in enumerate(self.grid):
d += len(self.grid[i])
for j, row in enumerate(column):
h = i + 4 <= self.size['c']
v = j + 4 <= len(self.grid[i])
if v:
if 1 == len(set(self.grid[i][j:j + 4])):
return True
if h:
if len(self.grid[i]) > j and len(self.grid[i + 1]) > j and len(self.grid[i + 2]) > j and len(
self.grid[i + 3]) > j:
s_r = set()
for k in range(4):
s_r.add(self.grid[i + k][j])
if 1 == len(s_r):
return True
if h:
s = set()
for k in range(4):
if len(self.grid[i + k]) > j + k:
s.add(self.grid[i + k][j + k])
else:
s.add('??')
if 1 == len(s):
return True
if h and j - 4 + 1 >= 0:
s = set()
for k in range(4):
if len(self.grid[i + k]) > j - k:
s.add(self.grid[i + k][j - k])
else:
s.add('??')
if 1 == len(s):
return -1
if d == self.size['c'] * self.size['r']:
return 'draw'
return False
| |
from __future__ import absolute_import
import six
from mistune import markdown
from django.core.urlresolvers import reverse
from sentry.models import IntegrationExternalProject, OrganizationIntegration, User
from sentry.integrations.issues import IssueSyncMixin
from sentry.integrations.exceptions import ApiUnauthorized, ApiError
from django.utils.translation import ugettext as _
class VstsIssueSync(IssueSyncMixin):
description = "Integrate Azure DevOps work items by linking a project."
slug = "vsts"
conf_key = slug
issue_fields = frozenset(["id", "title", "url"])
done_categories = frozenset(["Resolved", "Completed"])
def get_persisted_default_config_fields(self):
return ["project"]
def create_default_repo_choice(self, default_repo):
# default_repo should be the project_id
project = self.get_client().get_project(self.instance, default_repo)
return (project["id"], project["name"])
def get_project_choices(self, group, **kwargs):
client = self.get_client()
try:
projects = client.get_projects(self.instance)["value"]
except (ApiError, ApiUnauthorized, KeyError) as e:
self.raise_error(e)
project_choices = [(project["id"], project["name"]) for project in projects]
params = kwargs.get("params", {})
defaults = self.get_project_defaults(group.project_id)
try:
default_project = params.get(
"project", defaults.get("project") or project_choices[0][0]
)
except IndexError:
return None, project_choices
# If a project has been selected outside of the default list of
# projects, stick it onto the front of the list so that it can be
# selected.
try:
next(True for r in project_choices if r[0] == default_project)
except StopIteration:
try:
project_choices.insert(0, self.create_default_repo_choice(default_project))
except (ApiError, ApiUnauthorized):
return None, project_choices
return default_project, project_choices
def get_create_issue_config(self, group, **kwargs):
kwargs["link_referrer"] = "vsts_integration"
fields = super(VstsIssueSync, self).get_create_issue_config(group, **kwargs)
# Azure/VSTS has BOTH projects and repositories. A project can have many repositories.
# Workitems (issues) are associated with the project not the repository.
default_project, project_choices = self.get_project_choices(group, **kwargs)
return [
{
"name": "project",
"required": True,
"type": "choice",
"choices": project_choices,
"defaultValue": default_project,
"label": _("Project"),
"placeholder": default_project or _("MyProject"),
}
] + fields
def get_link_issue_config(self, group, **kwargs):
fields = super(VstsIssueSync, self).get_link_issue_config(group, **kwargs)
org = group.organization
autocomplete_url = reverse("sentry-extensions-vsts-search", args=[org.slug, self.model.id])
for field in fields:
if field["name"] == "externalIssue":
field["url"] = autocomplete_url
field["type"] = "select"
return fields
def get_issue_url(self, key, **kwargs):
return "%s_workitems/edit/%s" % (self.instance, six.text_type(key))
def create_issue(self, data, **kwargs):
"""
Creates the issue on the remote service and returns an issue ID.
"""
project_id = data.get("project")
if project_id is None:
raise ValueError("Azure DevOps expects project")
client = self.get_client()
title = data["title"]
description = data["description"]
try:
created_item = client.create_work_item(
instance=self.instance,
project=project_id,
title=title,
# Decriptions cannot easily be seen. So, a comment will be added as well.
description=markdown(description),
comment=markdown(description),
)
except Exception as e:
self.raise_error(e)
project_name = created_item["fields"]["System.AreaPath"]
return {
"key": six.text_type(created_item["id"]),
"title": title,
"description": description,
"metadata": {"display_name": "%s#%s" % (project_name, created_item["id"])},
}
def get_issue(self, issue_id, **kwargs):
client = self.get_client()
work_item = client.get_work_item(self.instance, issue_id)
return {
"key": six.text_type(work_item["id"]),
"title": work_item["fields"]["System.Title"],
"description": work_item["fields"].get("System.Description"),
"metadata": {
"display_name": "%s#%s" % (work_item["fields"]["System.AreaPath"], work_item["id"])
},
}
def sync_assignee_outbound(self, external_issue, user, assign=True, **kwargs):
client = self.get_client()
assignee = None
if assign is True:
sentry_emails = [email.email.lower() for email in user.get_verified_emails()]
continuation_token = None
while True:
vsts_users = client.get_users(self.model.name, continuation_token)
continuation_token = vsts_users.headers.get("X-MS-ContinuationToken")
for vsts_user in vsts_users["value"]:
vsts_email = vsts_user.get(u"mailAddress")
if vsts_email and vsts_email.lower() in sentry_emails:
assignee = vsts_user["mailAddress"]
break
if not continuation_token:
break
if assignee is None:
# TODO(lb): Email people when this happens
self.logger.info(
"vsts.assignee-not-found",
extra={
"integration_id": external_issue.integration_id,
"user_id": user.id,
"issue_key": external_issue.key,
},
)
return
try:
client.update_work_item(self.instance, external_issue.key, assigned_to=assignee)
except (ApiUnauthorized, ApiError):
self.logger.info(
"vsts.failed-to-assign",
extra={
"integration_id": external_issue.integration_id,
"user_id": user.id,
"issue_key": external_issue.key,
},
)
def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs):
client = self.get_client()
work_item = client.get_work_item(self.instance, external_issue.key)
# For some reason, vsts doesn't include the project id
# in the work item response.
# TODO(jess): figure out if there's a better way to do this
vsts_project_name = work_item["fields"]["System.TeamProject"]
vsts_projects = client.get_projects(self.instance)["value"]
vsts_project_id = None
for p in vsts_projects:
if p["name"] == vsts_project_name:
vsts_project_id = p["id"]
break
try:
external_project = IntegrationExternalProject.objects.get(
external_id=vsts_project_id,
organization_integration_id__in=OrganizationIntegration.objects.filter(
organization_id=external_issue.organization_id,
integration_id=external_issue.integration_id,
),
)
except IntegrationExternalProject.DoesNotExist:
self.logger.info(
"vsts.external-project-not-found",
extra={
"integration_id": external_issue.integration_id,
"is_resolved": is_resolved,
"issue_key": external_issue.key,
},
)
return
status = (
external_project.resolved_status if is_resolved else external_project.unresolved_status
)
try:
client.update_work_item(self.instance, external_issue.key, state=status)
except (ApiUnauthorized, ApiError) as error:
self.logger.info(
"vsts.failed-to-change-status",
extra={
"integration_id": external_issue.integration_id,
"is_resolved": is_resolved,
"issue_key": external_issue.key,
"exception": error,
},
)
def should_unresolve(self, data):
done_states = self.get_done_states(data["project"])
return (
data["old_state"] in done_states
or data["old_state"] is None
and not data["new_state"] in done_states
)
def should_resolve(self, data):
done_states = self.get_done_states(data["project"])
return not data["old_state"] in done_states and data["new_state"] in done_states
def get_done_states(self, project):
client = self.get_client()
try:
all_states = client.get_work_item_states(self.instance, project)["value"]
except ApiError as err:
self.logger.info(
"vsts.get-done-states.failed",
extra={"integration_id": self.model.id, "exception": err},
)
return []
done_states = [
state["name"] for state in all_states if state["category"] in self.done_categories
]
return done_states
def get_issue_display_name(self, external_issue):
if external_issue.metadata is None:
return ""
return external_issue.metadata["display_name"]
def create_comment(self, issue_id, user_id, group_note):
comment = group_note.data["text"]
quoted_comment = self.create_comment_attribution(user_id, comment)
self.get_client().update_work_item(self.instance, issue_id, comment=quoted_comment)
def create_comment_attribution(self, user_id, comment_text):
# VSTS uses markdown or xml
# https://docs.microsoft.com/en-us/microsoftteams/platform/concepts/bots/bots-text-formats
user = User.objects.get(id=user_id)
attribution = "%s wrote:\n\n" % user.name
quoted_comment = "%s<blockquote>%s</blockquote>" % (attribution, comment_text)
return quoted_comment
def update_comment(self, issue_id, user_id, external_comment_id, comment_text):
# Azure does not support updating comments
pass
| |
from copy import copy
from datetime import datetime
import json
import string
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
import commonware.log
import jinja2
from tower import ugettext as _
from uuidfield.fields import UUIDField
from olympia import amo
from olympia.amo.models import ModelBase, ManagerBase
from olympia.access.models import Group
from olympia.addons.models import Addon
from olympia.bandwagon.models import Collection
from olympia.files.models import File
from olympia.reviews.models import Review
from olympia.tags.models import Tag
from olympia.translations.fields import save_signal, TranslatedField
from olympia.users.helpers import user_link
from olympia.users.models import UserProfile
from olympia.versions.models import Version
log = commonware.log.getLogger('devhub')
class RssKey(models.Model):
key = UUIDField(db_column='rsskey', auto=True, unique=True)
addon = models.ForeignKey(Addon, null=True, unique=True)
user = models.ForeignKey(UserProfile, null=True, unique=True)
created = models.DateField(default=datetime.now)
class Meta:
db_table = 'hubrsskeys'
class BlogPost(ModelBase):
title = models.CharField(max_length=255)
date_posted = models.DateField(default=datetime.now)
permalink = models.CharField(max_length=255)
class Meta:
db_table = 'blogposts'
class HubPromo(ModelBase):
VISIBILITY_CHOICES = (
(0, 'Nobody'),
(1, 'Visitors'),
(2, 'Developers'),
(3, 'Visitors and Developers'),
)
heading = TranslatedField()
body = TranslatedField()
visibility = models.SmallIntegerField(choices=VISIBILITY_CHOICES)
class Meta:
db_table = 'hubpromos'
def __unicode__(self):
return unicode(self.heading)
def flush_urls(self):
return ['*/developers*']
models.signals.pre_save.connect(save_signal, sender=HubPromo,
dispatch_uid='hubpromo_translations')
class HubEvent(ModelBase):
name = models.CharField(max_length=255, default='')
url = models.URLField(max_length=255, default='')
location = models.CharField(max_length=255, default='')
date = models.DateField(default=datetime.now)
class Meta:
db_table = 'hubevents'
def __unicode__(self):
return self.name
def flush_urls(self):
return ['*/developers*']
class AddonLog(ModelBase):
"""
This table is for indexing the activity log by addon.
"""
addon = models.ForeignKey(Addon)
activity_log = models.ForeignKey('ActivityLog')
class Meta:
db_table = 'log_activity_addon'
ordering = ('-created',)
class CommentLog(ModelBase):
"""
This table is for indexing the activity log by comment.
"""
activity_log = models.ForeignKey('ActivityLog')
comments = models.CharField(max_length=255)
class Meta:
db_table = 'log_activity_comment'
ordering = ('-created',)
class VersionLog(ModelBase):
"""
This table is for indexing the activity log by version.
"""
activity_log = models.ForeignKey('ActivityLog')
version = models.ForeignKey(Version)
class Meta:
db_table = 'log_activity_version'
ordering = ('-created',)
class UserLog(ModelBase):
"""
This table is for indexing the activity log by user.
Note: This includes activity performed unto the user.
"""
activity_log = models.ForeignKey('ActivityLog')
user = models.ForeignKey(UserProfile)
class Meta:
db_table = 'log_activity_user'
ordering = ('-created',)
class GroupLog(ModelBase):
"""
This table is for indexing the activity log by access group.
"""
activity_log = models.ForeignKey('ActivityLog')
group = models.ForeignKey(Group)
class Meta:
db_table = 'log_activity_group'
ordering = ('-created',)
class ActivityLogManager(ManagerBase):
def for_addons(self, addons):
if isinstance(addons, Addon):
addons = (addons,)
vals = (AddonLog.objects.filter(addon__in=addons)
.values_list('activity_log', flat=True))
if vals:
return self.filter(pk__in=list(vals))
else:
return self.none()
def for_version(self, version):
vals = (VersionLog.objects.filter(version=version)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_group(self, group):
return self.filter(grouplog__group=group)
def for_user(self, user):
vals = (UserLog.objects.filter(user=user)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_developer(self):
return self.exclude(action__in=amo.LOG_ADMINS + amo.LOG_HIDE_DEVELOPER)
def admin_events(self):
return self.filter(action__in=amo.LOG_ADMINS)
def editor_events(self):
return self.filter(action__in=amo.LOG_EDITORS)
def review_queue(self):
qs = self._by_type()
return (qs.filter(action__in=amo.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID))
def beta_signed_events(self):
"""List of all the auto signatures of beta files."""
return self.filter(action__in=[
amo.LOG.BETA_SIGNED_VALIDATION_PASSED.id,
amo.LOG.BETA_SIGNED_VALIDATION_FAILED.id])
def total_reviews(self, theme=False):
"""Return the top users, and their # of reviews."""
qs = self._by_type()
return (qs.values('user', 'user__display_name', 'user__username')
.filter(action__in=([amo.LOG.THEME_REVIEW.id] if theme
else amo.LOG_REVIEW_QUEUE))
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def monthly_reviews(self, theme=False):
"""Return the top users for the month, and their # of reviews."""
qs = self._by_type()
now = datetime.now()
created_date = datetime(now.year, now.month, 1)
return (qs.values('user', 'user__display_name', 'user__username')
.filter(created__gte=created_date,
action__in=([amo.LOG.THEME_REVIEW.id] if theme
else amo.LOG_REVIEW_QUEUE))
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def user_approve_reviews(self, user):
qs = self._by_type()
return qs.filter(action__in=amo.LOG_REVIEW_QUEUE, user__id=user.id)
def current_month_user_approve_reviews(self, user):
now = datetime.now()
ago = datetime(now.year, now.month, 1)
return self.user_approve_reviews(user).filter(created__gte=ago)
def user_position(self, values_qs, user):
try:
return next(i for (i, d) in enumerate(list(values_qs))
if d.get('user') == user.id) + 1
except StopIteration:
return None
def total_reviews_user_position(self, user, theme=False):
return self.user_position(self.total_reviews(theme), user)
def monthly_reviews_user_position(self, user, theme=False):
return self.user_position(self.monthly_reviews(theme), user)
def _by_type(self):
qs = super(ActivityLogManager, self).get_query_set()
table = 'log_activity_addon'
return qs.extra(
tables=[table],
where=['%s.activity_log_id=%s.id'
% (table, 'log_activity')])
class SafeFormatter(string.Formatter):
"""A replacement for str.format that escapes interpolated values."""
def get_field(self, *args, **kw):
# obj is the value getting interpolated into the string.
obj, used_key = super(SafeFormatter, self).get_field(*args, **kw)
return jinja2.escape(obj), used_key
class ActivityLog(ModelBase):
TYPES = sorted([(value.id, key) for key, value in amo.LOG.items()])
user = models.ForeignKey('users.UserProfile', null=True)
action = models.SmallIntegerField(choices=TYPES, db_index=True)
_arguments = models.TextField(blank=True, db_column='arguments')
_details = models.TextField(blank=True, db_column='details')
objects = ActivityLogManager()
formatter = SafeFormatter()
class Meta:
db_table = 'log_activity'
ordering = ('-created',)
def f(self, *args, **kw):
"""Calls SafeFormatter.format and returns a Markup string."""
# SafeFormatter escapes everything so this is safe.
return jinja2.Markup(self.formatter.format(*args, **kw))
@property
def arguments(self):
try:
# d is a structure:
# ``d = [{'addons.addon':12}, {'addons.addon':1}, ... ]``
d = json.loads(self._arguments)
except:
log.debug('unserializing data from addon_log failed: %s' % self.id)
return None
objs = []
for item in d:
# item has only one element.
model_name, pk = item.items()[0]
if model_name in ('str', 'int', 'null'):
objs.append(pk)
else:
(app_label, model_name) = model_name.split('.')
model = models.loading.get_model(app_label, model_name)
# Cope with soft deleted models and unlisted addons.
objs.extend(model.get_unfiltered_manager().filter(pk=pk))
return objs
@arguments.setter
def arguments(self, args=[]):
"""
Takes an object or a tuple of objects and serializes them and stores it
in the db as a json string.
"""
if args is None:
args = []
if not isinstance(args, (list, tuple)):
args = (args,)
serialize_me = []
for arg in args:
if isinstance(arg, basestring):
serialize_me.append({'str': arg})
elif isinstance(arg, (int, long)):
serialize_me.append({'int': arg})
elif isinstance(arg, tuple):
# Instead of passing an addon instance you can pass a tuple:
# (Addon, 3) for Addon with pk=3
serialize_me.append(dict(((unicode(arg[0]._meta), arg[1]),)))
else:
serialize_me.append(dict(((unicode(arg._meta), arg.pk),)))
self._arguments = json.dumps(serialize_me)
@property
def details(self):
if self._details:
return json.loads(self._details)
@details.setter
def details(self, data):
self._details = json.dumps(data)
@property
def log(self):
return amo.LOG_BY_ID[self.action]
def to_string(self, type_=None):
log_type = amo.LOG_BY_ID[self.action]
if type_ and hasattr(log_type, '%s_format' % type_):
format = getattr(log_type, '%s_format' % type_)
else:
format = log_type.format
# We need to copy arguments so we can remove elements from it
# while we loop over self.arguments.
arguments = copy(self.arguments)
addon = None
review = None
version = None
collection = None
tag = None
group = None
file_ = None
for arg in self.arguments:
if isinstance(arg, Addon) and not addon:
if arg.is_listed:
addon = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.name)
else:
addon = self.f(u'{0}', arg.name)
arguments.remove(arg)
if isinstance(arg, Review) and not review:
review = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), _('Review'))
arguments.remove(arg)
if isinstance(arg, Version) and not version:
text = _('Version {0}')
if arg.is_listed:
version = self.f(u'<a href="{1}">%s</a>' % text,
arg.version, arg.get_url_path())
else:
version = self.f(text, arg.version)
arguments.remove(arg)
if isinstance(arg, Collection) and not collection:
collection = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.name)
arguments.remove(arg)
if isinstance(arg, Tag) and not tag:
if arg.can_reverse():
tag = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.tag_text)
else:
tag = self.f('{0}', arg.tag_text)
if isinstance(arg, Group) and not group:
group = arg.name
arguments.remove(arg)
if isinstance(arg, File) and not file_:
validation = 'passed'
if self.action == amo.LOG.BETA_SIGNED_VALIDATION_FAILED.id:
validation = 'failed'
file_ = self.f(u'<a href="{0}">{1}</a> (validation {2})',
reverse('files.list', args=[arg.pk]),
arg.filename,
validation)
arguments.remove(arg)
user = user_link(self.user)
try:
kw = dict(addon=addon, review=review, version=version,
collection=collection, tag=tag, user=user, group=group,
file=file_)
return self.f(format, *arguments, **kw)
except (AttributeError, KeyError, IndexError):
log.warning('%d contains garbage data' % (self.id or 0))
return 'Something magical happened.'
def __unicode__(self):
return self.to_string()
def __html__(self):
return self
class SubmitStep(models.Model):
addon = models.ForeignKey(Addon)
step = models.IntegerField()
class Meta:
db_table = 'submit_step'
| |
import sys
import maya.cmds as cmds #@UnresolvedImport
import maya.OpenMaya as OpenMaya #@UnresolvedImport
VERSION = 1.0
'''
========================================================================
----> Creates and removes callbacks <----
========================================================================
'''
class Maya_Callback():
"""
----> Examples <----
import maya.cmds as cmds
import callback_util
# Example proc
def print_all_nodes(*args):
print cmds.ls('*')
print len(cmds.ls('*'))
# Create callback instance
callback_obj = callback_util.Callback((print_all_nodes))
# Run proc before scene updates
callback_obj.scene_update_before()
# Run proc after scene updates
callback_obj.scene_update_after()
# Adds more maya node
for i in range(20):
cmds.spaceLocator()
# Open new Maya scene
cmds.file (f=True, new=True)
# Removes callback from instance
callback_obj.remove()
*Author:*
* nick.silveira, Nicholas.Silveira@gmail.com, Jun 15, 2013 7:39:20 PM
"""
def __init__( self, procedure, window = None ):
# Get callback procedure
self.procedure = procedure
# Get callbacks main window
self.window = window
# Initiate callback list
self.callback_list = []
'''
========================================================================
----> Callback runs passed procedure when scene updates <----
========================================================================
'''
def scene_update_before( self ):
"""
----> Examples <----
import maya.cmds as cmds
import callback_util
def create_locator( *args ):
cmds.spaceLocator()
callback_obj = callback_util.Callback( ( create_locator ) )
callback_obj.scene_update_before()
"""
self.callback_list.append( OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kBeforeNew, self.run_callback ) )
self.callback_list.append( OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kBeforeImport, self.run_callback ) )
self.callback_list.append( OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kBeforeOpen, self.run_callback ) )
self.callback_list.append( OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kBeforeReference, self.run_callback ) )
self.callback_list.append( OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kBeforeRemoveReference, self.run_callback ) )
'''
========================================================================
----> Callback runs passed procedure when scene updates <----
========================================================================
'''
def scene_update_after( self ):
"""
----> Examples <----
import maya.cmds as cmds
import callback_util
def create_locator( *args ):
cmds.spaceLocator()
callback_obj = callback_util.Callback( ( create_locator ) )
callback_obj.scene_update_after()
"""
self.callback_list.append( OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kAfterNew, self.run_callback ) )
self.callback_list.append( OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kAfterImport, self.run_callback ) )
self.callback_list.append( OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kAfterOpen, self.run_callback ) )
self.callback_list.append( OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kAfterReference, self.run_callback ) )
self.callback_list.append( OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kAfterRemoveReference, self.run_callback ) )
'''
========================================================================
----> Changed Attribute Callback <----
========================================================================
'''
def add_attr( self, obj_name, attr_name ):
"""
*Arguments:*
* ``obj_name`` Pass object name
* ``attr_name`` Pass attribute name
*Examples:* ::
# Import Python modules
import sys
# Import Maya modules
import maya.cmds as cmds
# Import Callback module
import maya_callback
reload(maya_callback)
# Global Variables
locator_name = 'callback_loc'
attr_name = 'translate'
# Print out 'Attribute Callback Works!'
def print_something( *args ):
sys.stdout.write( '// Result: Attribute Callback Works!' )
# Create space locator
locator = cmds.spaceLocator( n = locator_name )[0]
# Create callback & add proc
callback = maya_callback.Maya_Callback( ( print_something ) )
# Add attribute callback
callback.add_attr( locator, attr_name )
# Remove callback
callback.remove()
"""
self.obj_name = obj_name
self.attr_name = attr_name
sel = cmds.ls( sl = True )
cmds.select( obj_name )
node = self.get_mobject( obj_name )
MSelectionList = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getActiveSelectionList( MSelectionList )
MSelectionList.getDependNode( 0, node )
self.callback_list.append( OpenMaya.MNodeMessage.addAttributeChangedCallback( node, self.run_add_attr, None ) )
cmds.select( sel )
'''
========================================================================
----> Run Changed Attribute Callback <----
========================================================================
'''
def run_add_attr( self, message, m_obj, *args ):
"""
*Arguments:*
* ``message`` Callback passes a message ID
* ``m_obj`` Callback passes MObject
"""
node_name, attr_name = m_obj.name().split( '.' )
if message == 2056:
if node_name == self.obj_name:
if attr_name == self.attr_name:
self.run_callback()
'''
========================================================================
----> Runs passed proc and removes ui callbacks if ui dosn't exist <----
========================================================================
'''
def run_callback( self, *args ):
if self.window:
run_proc = self.window_remove()
else:
run_proc = True
if run_proc:
self.procedure()
'''
========================================================================
----> Callback runs passed procedure when scene updates <----
========================================================================
'''
def window_remove( self, *args ):
if not cmds.window( self.window, exists = True, q = True ) and not cmds.dockControl( self.window, vis = True, q = True ):
for callback in self.callback_list:
OpenMaya.MMessage.removeCallback( callback )
sys.stdout.write( '// Removed {0} callbacks!'.format( self.window ) )
return False
return True
'''
========================================================================
----> Removes all callbacks in instance <----
========================================================================
'''
def remove( self ):
for callback in self.callback_list:
OpenMaya.MMessage.removeCallback( callback )
def get_mobject( self, name ):
selectionList = OpenMaya.MSelectionList()
selectionList.add( name )
node = OpenMaya.MObject()
selectionList.getDependNode( 0, node )
return node
'''
========================================================================
----> Code Sample <----
========================================================================
'''
def code_sample( self, *args ):
code = '''
import maya.cmds as cmds
import callback_util
# Example proc
def print_all_nodes(*args):
print cmds.ls('*')
print len(cmds.ls('*'))
# Create callback instance
callback_obj = callback_util.Callback((print_all_nodes))
# Run proc before scene updates
callback_obj.scene_update_before()
# Run proc after scene updates
callback_obj.scene_update_after()
# Adds more maya node
for i in range(20):
cmds.spaceLocator()
# Open new Maya scene
cmds.file (f=True, new=True)
# Removes callback from instance
callback_obj.remove()
'''
if cmds.window( 'code_sample_window', exists = True, q = True ):
cmds.deleteUI( 'code_sample_window' )
cmds.window( 'code_sample_window', title = 'Code Sample' )
cmds.paneLayout()
cmds.scrollField( editable = False, text = code.replace( ' ', '' ) )
cmds.showWindow()
| |
# -*- coding: utf-8 -*-
"""
This is a neural network model of the development of reduplicated canonical
babbling in human infancy.
This is a modification of Izhikevich's (2007 Cerebral Cortex) daspnet.m and of
a previous mode described in Warlaumont (2012, 2013 ICDL-EpiRob) and Warlaumont
& Finnegan (2016 PLOS ONE). Code from those papers was written in MATLAB. This
is a rewriting of the 2016 code in Python.
Vocal tract simulation is performed in Praat (so you must have Praat installed
for this to run).
This version currently only supports human reinforcement. In the MATLAB version
automatic salience-based reinforcement using a modified version of Coath et
al.'s (2009) auditory salience algorithms, written in MATLAB, was also an
option.
Anne S. Warlaumont
warlaumont@ucla.edu or anne.warlaumont@gmail.com
http://www.annewarlaumont.org
For updates, see https://github.com/AnneSWarlaumont/BabbleNN
"""
# Commented out for debugging:
# def sim(simid,path,T,reinforcer,muscscale,yoke,plotOn):
"""
Starts or restarts a simulation
simid: a unique identifier for this simulation. Should not contain spaces.
path: path to the directory where your sim data should be saved. No slash
at the end.
T: the length of time the experiment is to run in seconds. This can be
changed to a longer or shorter value when a simulation is restarted
reinforcer: the type of reinforcement. For now, must be 'human'.
muscscale: this scales the activity sent to Praat. 4 is the recommended
value
yoke: indicates whether to run an experiment or a yoked control simulation.
Set to False to run a regular simulation. Set to True to run a
yoked control. There must already have been a simulation of the same
id run, with its data on the path, for the simulation to yoke to.
plotOn: enables plots of several simulation parameters. Set to False to
disable plots and to True to enable.
Example use: sim('Mortimer','/Users/awarlau/Downloads','7200,'human',4,
False,False)
"""
#Temporary, for debugging:
simid = 'Mortimer'
path = '/Users/awarlau/Downloads'
T = 60 * 30 # sec * min * hr
reinforcer = 'relhipos' # 'agonist_spike' # 'relhipos' # # 'sumsmoothmusc>0'
thresh = 0
threshinc = 5
temprewhistlen = 20
muscscale = 4
yoke = False
plotOn = True
soutscale = 100
STDPadd = 1000
import os, numpy as np
DAinc = 1 # amount of dopamine given during reward
M = 100 # number of synapses per neuron
Ne = 800 # number of excitatory reservoir neurons
Ni = 200 # number of inhibitory reservoir neurons
N = Ne + Ni # total number of reservoir neurons
Nout = 200 # number of reservoir output neurons
Nmot = Nout # number of motor neurons
a = np.concatenate((0.02 * np.ones((Ne)), 0.1 * np.ones((Ni))))
# time scales of the membrane recovery variable for reservoir neurons
d = np.concatenate((8 * np.ones((Ne)), 2 * np.ones((Ni))))
# membrane recovery variable after-spike shift for reservoir neurons
a_mot = 0.02 * np.ones((Nmot))
# time scales of the membrane recovery variable for motor neurons
d_mot = 8 * np.ones((Nmot))
# membrane recovery variable after-spike shift for motor neurons
post = np.floor(np.concatenate(
(N * np.random.rand(Ne,M), Ne * np.random.rand(Ni,M))))
# Assign the postsynaptic neurons for each reservoir neuron
post_mot = np.repeat(np.arange(Nmot).transpose(),Nout,0)
# all output neurons connect to all motor neurons
s = np.concatenate((np.random.rand(Ne,M),-1 * np.random.rand(Ni,M)))
# synaptic weights within the reservoir
sout = np.random.rand(Nout,Nmot) # synaptic weights from output to motor
sout = soutscale * sout / np.mean(sout) # normalize sout
sd = np.zeros((Nout,Nmot)) # will store the changes to be made to sout
STDP = np.zeros(Nout)
v = -65 * np.ones((N)) # reservoir membrane potentials
v_mot = -65 * np.ones((Nmot)) # motor neuron membrane potentials
u = 0.2 * v # reservoir membrane recovery variables
u_mot = 0.2 * v_mot # motor neuron membrane recovery variables
firings = [] # reservoir neuron firings for the current second
outFirings = [] # output neuron firings for the current second
motFirings = [] # motor neuron firings for the current second
DA = 0 # level of dopamine above baseline
muscsmooth = 100 # spike train data smoothed by 100 ms moving average
sec = 0 # current time in the simulation
rew = [] # track when rewards were received
hist_sumsmoothmusc = [] # keep a record of sumsmoothmusc after each second
# Initialize reward policy variables:
if reinforcer == 'relhipos':
temprewhist = [False] * temprewhistlen # Keeps track, for up to 10 previous sounds, of
# when the threshold for reward was exceeded
rewcount = 0
# Absolute path where Praat can be found
praatPathmac = '/Applications/Praat.app/Contents/MacOS/Praat'
# Set data directory names:
wavdir = path + '/' + simid + '_Wav'
firingsdir = path + '/' + simid + '_Firings'
# Create data directories:
if os.path.isdir(wavdir) != True:
os.mkdir(wavdir)
if os.path.isdir(firingsdir) != True:
os.mkdir(firingsdir)
# Begin the simulation!
for sec in range(sec,T):
print('********************************************')
print('Second ' + str(sec+1) + ' of ' + str(T))
# Reset firings
firings = []
outFirings = []
motFirings = []
for t in range(0,1000): # millisecond timesteps
# give random input to reservoir and motor neurons:
I = 13 * (np.random.rand(N))
I_mot = 13 * (np.random.rand(Nmot))
# get the indices of fired neurons:
fired = v >= 30
fired_out = v[0:Nout] >= 30
fired_mot = v_mot >= 30
# reset the voltages for the neurons that fired:
v[fired] = -65
v_mot[fired_mot] = -65
# individual neuron dynamics:
u[fired] = u[fired] + d[fired]
u_mot[fired_mot] = u_mot[fired_mot] + d_mot[fired_mot]
# spike-timing dependent plasticity computations:
STDP[fired_out] = STDPadd # record output neuron (i.e.
# presynaptic neuron)spike times.
for k in range(0,Nmot):
if fired_mot[k]:
sd[:,k] = sd[:,k] + STDP # adjust sd for potentiation-eligible
# synapses
motFirings.append([t,k]) # update records of when motor
# neurons fired
for k in range(0,Nout):
if fired_out[k]:
outFirings.append([t,k]) # update the records of when
# output neurons fired
for k in range(0,N):
if fired[k]:
firings.append([t,k]) # update the records of when
# reservoir neurons fired
# For any presynaptic neuron that fired, calculate the input
# current to add to each of its postsynaptic neurons as
# proportional to the synaptic strength from the presynaptic to
# the postsynaptic neuron:
for k in range(0,len(firings)):
if firings[k][0] > t-1:
for l in range(0,np.size(post,1)):
postnum = int(post[firings[k][1], l])
I[postnum] = I[postnum] + s[firings[k][1], l]
# Calculate the currents to add to the motor neurons:
for k in range(0,len(outFirings)):
if outFirings[k][0] > t:
for l in range(0,np.size(post_mot,1)):
postnum = int(post_mot[outFirings[k][1], l])
I_mot[postnum] = I_mot[postnum] + 2 * sout[outFirings[k][1], l]
# Individual neuronal dynamics computations (for numerical
# stability the time step is 0.5 ms)
v = v + 0.5 * ((0.04 * v + 5) * v + 140 - u + I)
v = v + 0.5 * ((0.04 * v + 5) * v + 140 - u + I)
v_mot = v_mot + 0.5 * (
(0.04 * v_mot + 5) * v_mot + 140 - u_mot + I_mot)
v_mot = v_mot + 0.5 * (
(0.04 * v_mot + 5) * v_mot + 140 - u_mot + I_mot)
u = u + a * (0.2 * v - u)
u_mot = u_mot + a_mot * (0.2 * v_mot - u_mot)
# Exponential decay of the traces of presynaptic neuron firing
# with tau = 20 ms
STDP = 0.95 * STDP
# Exponential decay of the dopamine concentration over time
DA = DA * 0.995
# Every 10 ms, modify synaptic weights:
if (t + 1) % 10 == 0:
prevsout = sout # for debugging
sout = np.maximum(0, sout + DA * sd)
sout = soutscale * sout / np.mean(sout) # normalize
sd = 0.99 * sd # The eligibility trace decays exponentially
# evaluate the model and maybe give DA:
# initialize second-long records of agonist and antagonist spikes
if t == 0:
numfiredmusc1pos = -1 * np.ones(1000)
numfiredmusc1neg = -1 * np.ones(1000)
smoothmuscpos = -1 * np.ones(1000 - muscsmooth)
smoothmuscneg = -1 * np.ones(1000 - muscsmooth)
smoothmusc = -1 * np.ones(1000 - muscsmooth)
# Find out which of the agonist and antagonist jaw/lip motor
# neurons fired this ms:
numfiredmusc1pos[t] = sum(v_mot[0:int(Nmot/2)] >= 30)
numfiredmusc1neg[t] = sum(v_mot[int(Nmot/2):Nmot] >= 30)
if reinforcer == 'agonist_spike':
if numfiredmusc1pos[t] > 0:
rew.append(sec*1000+t)
if t == 999:
# Create a moving average of the summed spikes:
for smootht in range(muscsmooth - 1,999):
smoothmuscpos[smootht-muscsmooth+1] = np.mean(
numfiredmusc1pos[smootht-muscsmooth+1:smootht])
smoothmuscneg[smootht-muscsmooth+1] = np.mean(
numfiredmusc1neg[smootht-muscsmooth+1:smootht])
smoothmusc = muscscale * (smoothmuscpos - smoothmuscneg)
sumsmoothmusc = sum(smoothmusc)
hist_sumsmoothmusc.append(sumsmoothmusc)
if reinforcer == 'human':
print('sum(smoothmusc): ' + str(sum(smoothmusc)))
decision = input('Reward the model? Press y or n:\n')
if decision == 'y':
rew.append(sec*1000+t)
elif reinforcer == 'sumsmoothmusc>0':
print('sumsmoothmusc: ' + str(sumsmoothmusc))
if sumsmoothmusc > 0:
print('rewarded')
rew.append(sec*1000+t)
elif reinforcer == 'relhipos':
print('sumsmoothmusc: ' + str(sumsmoothmusc))
print('threshold: ' + str(thresh))
temprewhist[0:temprewhistlen-1] = temprewhist[1:temprewhistlen]
if sumsmoothmusc > thresh:
print('rewarded')
rew.append(sec*1000+t)
rewcount = rewcount + 1
temprewhist[temprewhistlen-1] = True
if sum(temprewhist)>=(.5 * temprewhistlen):
thresh = thresh + threshinc
temprewhist = [False] * temprewhistlen
else:
temprewhist[temprewhistlen-1] = False
print('sum(temprewhist): ' + str(sum(temprewhist)))
if sec >= temprewhistlen:
print(str(temprewhistlen) + ' s avg summsoothmusc: ' +
str(np.mean(np.array(hist_sumsmoothmusc[sec-
temprewhistlen+1:sec+1]))))
if sec*1000+t in rew:
DA = DA + DAinc
print(round(.1 * T))
print(np.mean(np.array(hist_sumsmoothmusc[0:round(.1 * T)])))
print(np.mean(np.array(hist_sumsmoothmusc[sec-round(.1 * T):sec])))
print(np.mean(np.array(hist_sumsmoothmusc[0:round(.5 * T)])))
print(np.mean(np.array(hist_sumsmoothmusc[sec-round(.5 * T):sec])))
print(np.mean(sout[:,0:int(Nmot/2)]))
print(np.mean(sout[:,int(Nmot/2):Nmot]))
| |
"""Helper functions for the NAPALM base."""
# Python3 support
from __future__ import print_function
from __future__ import unicode_literals
# std libs
import os
import sys
# third party libs
import jinja2
import jtextfsm as textfsm
from netaddr import EUI
from netaddr import mac_unix
from netaddr import IPAddress
# local modules
import napalm_base.exceptions
from napalm_base.utils.jinja_filters import CustomJinjaFilters
from napalm_base.utils import py23_compat
# ----------------------------------------------------------------------------------------------------------------------
# helper classes -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
class _MACFormat(mac_unix):
pass
_MACFormat.word_fmt = '%.2X'
# ----------------------------------------------------------------------------------------------------------------------
# callable helpers
# ----------------------------------------------------------------------------------------------------------------------
def load_template(cls, template_name, template_source=None, template_path=None,
openconfig=False, **template_vars):
try:
if isinstance(template_source, py23_compat.string_types):
template = jinja2.Template(template_source)
else:
current_dir = os.path.dirname(os.path.abspath(sys.modules[cls.__module__].__file__))
if (isinstance(template_path, py23_compat.string_types) and
os.path.isdir(template_path) and os.path.isabs(template_path)):
current_dir = os.path.join(template_path, cls.__module__.split('.')[-1])
# append driver name at the end of the custom path
if openconfig:
template_dir_path = '{current_dir}/oc_templates'.format(current_dir=current_dir)
else:
template_dir_path = '{current_dir}/templates'.format(current_dir=current_dir)
if not os.path.isdir(template_dir_path):
raise napalm_base.exceptions.DriverTemplateNotImplemented(
'''Config template dir does not exist: {path}.
Please create it and add driver-specific templates.'''.format(
path=template_dir_path
)
)
loader = jinja2.FileSystemLoader(template_dir_path)
environment = jinja2.Environment(loader=loader)
for filter_name, filter_function in CustomJinjaFilters.filters().items():
environment.filters[filter_name] = filter_function
template = environment.get_template('{template_name}.j2'.format(
template_name=template_name
))
configuration = template.render(**template_vars)
except jinja2.exceptions.TemplateNotFound:
raise napalm_base.exceptions.TemplateNotImplemented(
"Config template {template_name}.j2 is not defined under {path}".format(
template_name=template_name,
path=template_dir_path
)
)
except (jinja2.exceptions.UndefinedError, jinja2.exceptions.TemplateSyntaxError) as jinjaerr:
raise napalm_base.exceptions.TemplateRenderException(
"Unable to render the Jinja config template {template_name}: {error}".format(
template_name=template_name,
error=jinjaerr.message
)
)
return cls.load_merge_candidate(config=configuration)
def textfsm_extractor(cls, template_name, raw_text):
"""
Applies a TextFSM template over a raw text and return the matching table.
Main usage of this method will be to extract data form a non-structured output
from a network device and return the values in a table format.
:param cls: Instance of the driver class
:param template_name: Specifies the name of the template to be used
:param raw_text: Text output as the devices prompts on the CLI
:return: table-like list of entries
"""
textfsm_data = list()
cls.__class__.__name__.replace('Driver', '')
current_dir = os.path.dirname(os.path.abspath(sys.modules[cls.__module__].__file__))
template_dir_path = '{current_dir}/utils/textfsm_templates'.format(
current_dir=current_dir
)
template_path = '{template_dir_path}/{template_name}.tpl'.format(
template_dir_path=template_dir_path,
template_name=template_name
)
try:
fsm_handler = textfsm.TextFSM(open(template_path))
except IOError:
raise napalm_base.exceptions.TemplateNotImplemented(
"TextFSM template {template_name}.tpl is not defined under {path}".format(
template_name=template_name,
path=template_dir_path
)
)
except textfsm.TextFSMTemplateError as tfte:
raise napalm_base.exceptions.TemplateRenderException(
"Wrong format of TextFSM template {template_name}: {error}".format(
template_name=template_name,
error=py23_compat.text_type(tfte)
)
)
objects = fsm_handler.ParseText(raw_text)
for obj in objects:
index = 0
entry = {}
for entry_value in obj:
entry[fsm_handler.header[index].lower()] = entry_value
index += 1
textfsm_data.append(entry)
return textfsm_data
def find_txt(xml_tree, path, default=''):
"""
Extracts the text value from an XML tree, using XPath.
In case of error, will return a default value.
:param xml_tree: the XML Tree object. Assumed is <type 'lxml.etree._Element'>.
:param path: XPath to be applied, in order to extract the desired data.
:param default: Value to be returned in case of error.
:return: a str value.
"""
value = ''
try:
xpath_applied = xml_tree.xpath(path) # will consider the first match only
if len(xpath_applied) and xpath_applied[0] is not None:
xpath_result = xpath_applied[0]
if isinstance(xpath_result, type(xml_tree)):
value = xpath_result.text.strip()
else:
value = xpath_result
except Exception: # in case of any exception, returns default
value = default
return py23_compat.text_type(value)
def convert(to, who, default=u''):
"""
Converts data to a specific datatype.
In case of error, will return a default value.
:param to: datatype to be casted to.
:param who: value to cast.
:param default: value to return in case of error.
:return: a str value.
"""
if who is None:
return default
try:
return to(who)
except: # noqa
return default
def mac(raw):
"""
Converts a raw string to a standardised MAC Address EUI Format.
:param raw: the raw string containing the value of the MAC Address
:return: a string with the MAC Address in EUI format
Example:
.. code-block:: python
>>> mac('0123.4567.89ab')
u'01:23:45:67:89:AB'
Some vendors like Cisco return MAC addresses like a9:c5:2e:7b:6: which is not entirely valid
(with respect to EUI48 or EUI64 standards). Therefore we need to stuff with trailing zeros
Example
>>> mac('a9:c5:2e:7b:6:')
u'A9:C5:2E:7B:60:00'
If Cisco or other obscure vendors use their own standards, will throw an error and we can fix
later, however, still works with weird formats like:
>>> mac('123.4567.89ab')
u'01:23:45:67:89:AB'
>>> mac('23.4567.89ab')
u'00:23:45:67:89:AB'
"""
if raw.endswith(':'):
flat_raw = raw.replace(':', '')
raw = '{flat_raw}{zeros_stuffed}'.format(
flat_raw=flat_raw,
zeros_stuffed='0'*(12-len(flat_raw))
)
return py23_compat.text_type(EUI(raw, dialect=_MACFormat))
def ip(addr):
"""
Converts a raw string to a valid IP address.
Motivation: the groups of the IP addreses may contain leading zeros. IPv6 addresses can \
contain sometimes uppercase characters. E.g.: 2001:0dB8:85a3:0000:0000:8A2e:0370:7334 has \
the same logical value as 2001:db8:85a3::8a2e:370:7334. However, their values as strings are \
not the same.
:param raw: the raw string containing the value of the IP Address
:return: a string containing the IP Address in a standard format (no leading zeros, \
zeros-grouping, lowercase)
Example:
.. code-block:: python
>>> ip('2001:0dB8:85a3:0000:0000:8A2e:0370:7334')
u'2001:db8:85a3::8a2e:370:7334'
"""
return py23_compat.text_type(IPAddress(addr))
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
import ast
import datetime
import functools
import gc
import hashlib
import inspect
import os
import random
import sys
import time
import urllib.parse
import urllib.request
import weakref
import requests
from clusterfuzz._internal.base import errors
from clusterfuzz._internal.base import memoize
from clusterfuzz._internal.base import retry
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
try:
import psutil
except ImportError:
psutil = None
# FIXME: Binary extensions list is still very basic.
BINARY_EXTENSIONS = [
# Media formats.
'.mp3',
'.ogg',
'.mp4',
'.webm',
# Image Formats.
'.png',
'.jpg',
'.gif',
# Misc.
'.pdf',
'.swf',
]
FUZZ_PREFIX = 'fuzz-'
TEXT_EXTENSIONS = [
'.css', '.js', '.htm', '.html', '.svg', '.xhtml', '.xht', '.xml', '.xsl'
]
URL_REQUEST_RETRIES = 5
URL_REQUEST_FAIL_WAIT = 1
WINDOWS_PREFIX_PATH = '\\\\?\\'
# Thread pool for use in function timeouts.
THREAD_POOL = None
LOCAL_SOURCE_MANIFEST = os.path.join('src', 'appengine', 'resources',
'clusterfuzz-source.manifest')
def utcnow():
"""Return datetime.datetime.utcnow(). We need this method because we can't
mock built-in methods."""
return datetime.datetime.utcnow() # pragma: no cover.
def current_date_time():
"""Returns current date and time."""
return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')
def utc_date_to_timestamp(date):
"""Converts a (UTC) datetime.date to a UNIX timestamp."""
return (date - datetime.date(1970, 1, 1)).total_seconds()
def utc_datetime_to_timestamp(dt):
"""Converts a (UTC) datetime.date to a UNIX timestamp."""
return (dt - datetime.datetime.utcfromtimestamp(0)).total_seconds()
def decode_to_unicode(obj):
"""Decode object to unicode encoding."""
if not hasattr(obj, 'decode'):
return obj
return obj.decode('utf-8', errors='ignore')
def encode_as_unicode(obj):
"""Encode a string as unicode, or leave bytes as they are."""
if not hasattr(obj, 'encode'):
return obj
return obj.encode('utf-8')
@retry.wrap(
retries=URL_REQUEST_RETRIES,
delay=URL_REQUEST_FAIL_WAIT,
function='base.utils.fetch_url')
def fetch_url(url):
"""Fetch url content."""
operations_timeout = environment.get_value('URL_BLOCKING_OPERATIONS_TIMEOUT')
response = requests.get(url, timeout=operations_timeout)
if response.status_code == 404:
return None
response.raise_for_status()
return response.text
def fields_match(string_1,
string_2,
field_separator=':',
allow_empty_fields=True):
"""Match fields of two strings, separated by a |field_separator|. Empty fields
can be ignored via |allow_empty_fields| flag."""
if string_1 is None or string_2 is None:
return False
if string_1 == string_2:
return True
string_1_fields = string_1.split(field_separator)
string_2_fields = string_2.split(field_separator)
if not allow_empty_fields and len(string_1_fields) != len(string_2_fields):
return False
min_fields_length = min(len(string_1_fields), len(string_2_fields))
for i in range(min_fields_length):
if string_1_fields[i] != string_2_fields[i]:
return False
return True
def file_path_to_file_url(path):
"""Return a path as a file scheme url."""
if not path:
return ''
path = path.lstrip(WINDOWS_PREFIX_PATH)
return urllib.parse.urljoin('file:', urllib.request.pathname2url(path))
def filter_file_list(file_list):
"""Filters file list by removing duplicates, non-existent files
and directories."""
filtered_file_list = []
for file_path in file_list:
if not os.path.exists(file_path):
continue
if os.path.isdir(file_path):
continue
# Do a os specific case normalization before comparison.
if (os.path.normcase(file_path) in list(
map(os.path.normcase, filtered_file_list))):
continue
filtered_file_list.append(file_path)
if len(filtered_file_list) != len(file_list):
logs.log('Filtered file list (%s) from (%s).' % (str(filtered_file_list),
str(file_list)))
return filtered_file_list
def find_binary_path(app_directory, binary_file_subpath):
"""Find the path to a binary given the app directory and the file name.
This is necessary as cov files are created in the root app directory, and we
need a way to find the corresponding binary to symbolize addresses."""
binary_path = os.path.join(app_directory, binary_file_subpath)
if os.path.exists(binary_path):
# Common case: the binary exists in the root directory.
return binary_path
# Match the longest file sub-path suffix.
binary_file_subpath_with_sep = binary_file_subpath
if not binary_file_subpath_with_sep.startswith(os.sep):
binary_file_subpath_with_sep = os.sep + binary_file_subpath_with_sep
for root, _, filenames in os.walk(app_directory):
for filename in filenames:
file_path = os.path.join(root, filename)
if file_path.endswith(binary_file_subpath_with_sep):
return file_path
# Otherwise, do a search for the filename.
binary_filename = os.path.basename(binary_file_subpath)
for root, _, filenames in os.walk(app_directory):
for filename in filenames:
if filename == binary_filename:
file_path = os.path.join(root, filename)
return file_path
return None
def get_application_id():
"""Return application id. Code simplified based off original implementation in
AppEngine SDK get_identity.get_application_id."""
app_id = environment.get_value('APPLICATION_ID')
if app_id is None:
return None
psep = app_id.find('~')
if psep > 0:
app_id = app_id[psep + 1:]
return app_id
def service_account_email():
"""Get the service account name."""
# TODO(ochang): Detect GCE and return the GCE service account instead.
email_id = get_application_id()
if ':' in email_id:
domain, application_id = email_id.split(':')
email_id = application_id + '.' + domain
return email_id + '@appspot.gserviceaccount.com'
def get_bot_testcases_file_path(input_directory):
"""Returns path to bot-specific fuzzed testcases."""
# Using |FUZZ_INPUTS| prevents putting high load on nfs servers for cases
# when |input_directory| is a cloud storage data bundle. We can't rely
# on |FUZZ_INPUTS| always since it might not be available during local fuzzer
# testing, so use |input_directory| if it is not defined.
local_testcases_directory = environment.get_value('FUZZ_INPUTS')
bot_testcases_directory = (
local_testcases_directory
if local_testcases_directory else input_directory)
bot_name = environment.get_value('BOT_NAME')
bot_testcases_filename = '.%s_testcases' % bot_name
bot_testcases_file_path = os.path.join(bot_testcases_directory,
bot_testcases_filename)
return bot_testcases_file_path
def get_crash_stacktrace_output(application_command_line,
symbolized_stacktrace,
unsymbolized_stacktrace=None,
build_type=None):
"""Return output string with symbolized and unsymbolized stacktraces
combined."""
def _guess_build_type(application_command_line):
if 'stable' in application_command_line:
return 'stable'
if 'beta' in application_command_line:
return 'beta'
if sub_string_exists_in(['debug', 'dbg'], application_command_line):
return 'debug'
return 'release'
separator = '-' * 40
if not build_type:
build_type = _guess_build_type(application_command_line)
crash_stacktraces_output = environment.get_environment_settings_as_string()
if application_command_line:
crash_stacktraces_output += (
'[Command line] %s\n\n' % application_command_line)
crash_stacktraces_output += ('+%s%s Build Stacktrace%s+\n%s' % (
separator, build_type.capitalize(), separator, symbolized_stacktrace))
# No unsymbolized stack available. Bail out.
if not unsymbolized_stacktrace:
return crash_stacktraces_output
unsymbolized_stacktrace_diff = get_unique_lines_in_unsymbolized_stack(
symbolized_stacktrace, unsymbolized_stacktrace)
if unsymbolized_stacktrace_diff:
crash_stacktraces_output += (
'\n\n+%s%s Build Unsymbolized Stacktrace (diff)%s+\n\n%s' %
(separator, build_type.capitalize(), separator,
unsymbolized_stacktrace_diff))
return crash_stacktraces_output
def get_directory_hash_for_path(file_path):
"""Return the directory hash for a file path (excludes file name)."""
root_directory = environment.get_value('ROOT_DIR')
directory_path = os.path.dirname(file_path)
normalized_directory_path = remove_prefix(directory_path,
root_directory + os.sep)
normalized_directory_path = normalized_directory_path.replace('\\', '/')
return string_hash(normalized_directory_path)
def get_file_contents_with_fatal_error_on_failure(path):
"""Return the contents of the specified file, or None on error."""
try:
with open(path, 'rb') as file_handle:
data = file_handle.read()
return data
except IOError:
logs.log_error('Unable to read file `%s\'' % path)
raise errors.BadStateError
def get_line_seperator(label=''):
"""Return a line separator with an optional label."""
separator = '-' * 40
result = '\n\n%s%s%s\n\n' % (separator, label, separator)
return result
def get_normalized_relative_path(file_path, directory_path):
"""Return normalized relative path for file w.r.t to a directory."""
normalized_relative_file_path = remove_prefix(file_path,
directory_path + os.sep)
normalized_relative_file_path = (
normalized_relative_file_path.replace('\\', '/'))
return normalized_relative_file_path
def get_path_without_ext(path):
"""Return a path excluding the extension."""
return os.path.splitext(path)[0]
def get_process_ids(process_id, recursive=True):
"""Return list of pids for a process and its descendants."""
# Try to find the running process.
if not psutil.pid_exists(process_id):
return []
pids = [process_id]
try:
psutil_handle = psutil.Process(process_id)
children = psutil_handle.children(recursive=recursive)
for child in children:
pids.append(child.pid)
except psutil.NoSuchProcess:
# Avoid too much logging when the process already died.
return []
except (psutil.AccessDenied, OSError):
logs.log_warn('Failed to get process children.')
return []
return pids
def get_line_count_string(line_count):
"""Return string representation for size."""
if line_count == 0:
return 'empty'
if line_count == 1:
return '1 line'
return '%d lines' % line_count
def get_size_string(size):
"""Return string representation for size."""
if size < 1 << 10:
return '%d B' % size
if size < 1 << 20:
return '%d KB' % (size >> 10)
if size < 1 << 30:
return '%d MB' % (size >> 20)
return '%d GB' % (size >> 30)
def get_unique_lines_in_unsymbolized_stack(symbolized_stacktrace,
unsymbolized_stacktrace):
"""Return unique lines in unsymbolized stacktrace that are not in the
symbolized stacktrace."""
if symbolized_stacktrace == unsymbolized_stacktrace:
return ''
symbolized_stacktrace_lines = symbolized_stacktrace.splitlines()
unsymbolized_stacktrace_lines = unsymbolized_stacktrace.splitlines()
stripped_symbolized_stacktrace_lines = set()
for line in symbolized_stacktrace_lines:
stripped_symbolized_stacktrace_lines.add(line.strip())
index = 0
last_index = len(unsymbolized_stacktrace_lines) - 1
start = -1
end = -1
while index <= last_index:
if (unsymbolized_stacktrace_lines[index].strip() not in
stripped_symbolized_stacktrace_lines):
if start == -1:
start = index
end = index + 1
else:
end = index
index += 1
if start == -1:
# Nothing unique found, return empty string.
return ''
line_gap = 2
start = max(0, start - line_gap)
end = min(end + line_gap, last_index + 1)
result = '\n'.join(unsymbolized_stacktrace_lines[start:end])
return result
def indent_string(string, chars):
"""Indents a string by x number of characters."""
indented_string = ''
for line in string.splitlines():
indented_string += '%s%s\n' % ((' ' * chars), line)
# Strip the ending '\n' and return result.
return indented_string[0:-1]
def is_binary_file(file_path, bytes_to_read=1024):
"""Return true if the file looks like a binary file."""
file_extension = os.path.splitext(file_path)[1].lower()
if file_extension in BINARY_EXTENSIONS:
return True
if file_extension in TEXT_EXTENSIONS:
return False
text_characters = list(map(chr, list(range(32, 128)))) + ['\r', '\n', '\t']
try:
with open(file_path, 'rb') as file_handle:
data = file_handle.read(bytes_to_read)
except:
logs.log_error('Could not read file %s in is_binary_file.' % file_path)
return None
binary_data = [char for char in data if char not in text_characters]
return len(binary_data) > len(data) * 0.1
def is_recursive_call():
"""Returns true if the caller function is called recursively."""
try:
stack_frames = inspect.stack()
caller_name = stack_frames[1][3]
for stack_frame_index in range(2, len(stack_frames)):
if caller_name == stack_frames[stack_frame_index][3]:
return True
except:
pass
return False
def is_valid_testcase_file(file_path,
check_if_exists=True,
size_limit=None,
allowed_extensions=None):
"""Return true if the file looks like a testcase file."""
filename = os.path.basename(file_path)
if filename.startswith('.') or filename.startswith(FUZZ_PREFIX):
return False
if allowed_extensions:
file_extension = os.path.splitext(file_path)[1].lower()
if file_extension not in allowed_extensions:
return False
directories_to_ignore = ['.git', '.hg', '.svn']
for directory_to_ignore in directories_to_ignore:
directory_string = '%s%s%s' % (os.sep, directory_to_ignore, os.sep)
if directory_string in file_path:
return False
if (check_if_exists or size_limit) and not os.path.exists(file_path):
return False
if size_limit and os.path.getsize(file_path) > size_limit:
return False
return True
def maximum_parallel_processes_allowed():
"""Return maximum number of parallel processes allowed. Adjust it based
on thread multiplier."""
if environment.is_trusted_host():
# gRPC only supports 1 thread/process.
return 1
max_parallel_process_count = environment.get_value('MAX_FUZZ_THREADS', 1)
thread_multiplier = environment.get_value('THREAD_MULTIPLIER', 1)
max_parallel_process_count *= thread_multiplier
return int(max_parallel_process_count)
def normalize_path(path):
"""Normalize path. This is needed on windows because windows' paths are
case-insensitive."""
return os.path.normcase(os.path.normpath(path))
def python_gc():
"""Call python's garbage collector."""
# gc_collect isn't perfectly synchronous, because it may
# break reference cycles that then take time to fully
# finalize. Call it thrice and hope for the best.
for _ in range(3):
gc.collect()
def random_element_from_list(element_list):
"""Returns a random element from list."""
return element_list[random.SystemRandom().randint(0, len(element_list) - 1)]
def random_number(start, end):
"""Returns a random number between start and end."""
return random.SystemRandom().randint(start, end)
# pylint: disable=inconsistent-return-statements
def random_weighted_choice(element_list, weight_attribute='weight'):
"""Returns a random element from list taking its weight into account."""
total = sum(getattr(e, weight_attribute) for e in element_list)
random_pick = random.SystemRandom().uniform(0, total)
temp = 0
for element in element_list:
element_weight = getattr(element, weight_attribute)
if element_weight == 0:
continue
if temp + element_weight >= random_pick:
return element
temp += element_weight
assert False, 'Failed to make a random weighted choice.'
def read_data_from_file(file_path, eval_data=True, default=None):
"""Returns eval-ed data from file."""
if not os.path.exists(file_path):
return default
failure_wait_interval = environment.get_value('FAIL_WAIT')
file_content = None
retry_limit = environment.get_value('FAIL_RETRIES')
for _ in range(retry_limit):
try:
with open(file_path, 'rb') as file_handle:
file_content = file_handle.read()
except:
file_content = None
logs.log_warn('Error occurred while reading %s, retrying.' % file_path)
time.sleep(random.uniform(1, failure_wait_interval))
continue
if file_content is None:
logs.log_error('Failed to read data from file %s.' % file_path)
return None
if not eval_data:
return file_content
if not file_content:
return default
try:
return ast.literal_eval(file_content.decode('utf-8'))
except (SyntaxError, TypeError):
return None
def remove_prefix(string, prefix):
"""Strips the prefix from a string."""
if string.startswith(prefix):
return string[len(prefix):]
return string
def remove_sub_strings(string, substrings):
"""Strips substrings from a given string."""
result = string
for substring in substrings:
result = result.replace(substring, '')
return result
def restart_machine():
"""Restart machine."""
if environment.platform() == 'WINDOWS':
os.system('shutdown /f /r /t 0')
else:
# POSIX platforms.
os.system('sudo shutdown -r now')
def search_bytes_in_file(search_bytes, file_handle):
"""Helper to search for bytes in a large binary file without memory
issues.
"""
# TODO(aarya): This is too brittle and will fail if we have a very large
# line.
for line in file_handle:
if search_bytes in line:
return True
return False
def string_hash(obj):
"""Returns a SHA-1 hash of the object. Not used for security purposes."""
return hashlib.sha1(str(obj).encode('utf-8')).hexdigest()
def entity_hash(obj):
"""Returns a deterministic hash of a ndb entity.
If an entity has been recently modified, put() must be called on it before
this function will pick up the changes.
"""
hasher = hashlib.sha1()
entity_dict = obj.to_dict()
for key in sorted(entity_dict.keys()):
hasher.update(str(entity_dict[key]).encode('utf-8'))
return hasher.hexdigest()
def string_is_true(value):
"""Check to see if a string has a value that should be treated as True."""
return value and value != 'false' and value != 'False' and value != '0'
def strip_from_left(string, prefix):
"""Strip a prefix from start from string."""
if not string.startswith(prefix):
return string
return string[len(prefix):]
def strip_from_right(string, suffix):
"""Strip a suffix from end of string."""
if not string.endswith(suffix):
return string
return string[:len(string) - len(suffix)]
def sub_string_exists_in(substring_list, string):
"""Return true if one of the substring in the list is found in |string|."""
for substring in substring_list:
if substring in string:
return True
return False
def time_difference_string(timestamp):
"""Return time difference as a string."""
if not timestamp:
return ''
delta = int((datetime.datetime.utcnow() - timestamp).total_seconds())
d_minutes = delta // 60
d_hours = d_minutes // 60
d_days = d_hours // 24
if d_days > 6:
return '%s' % str(timestamp).split()[0]
if d_days > 1:
return '%s days ago' % d_days # starts at 2 days.
if d_hours > 1:
return '%s hours ago' % d_hours # starts at 2 hours.
if d_minutes > 1:
return '%s minutes ago' % d_minutes
if d_minutes > 0:
return '1 minute ago'
if delta > -30:
return 'moments ago'
# Only say something is in the future if it is more than just clock skew.
return 'in the future'
def timeout(duration):
"""Timeout decorator for functions."""
def decorator(func):
"""Decorates the given function."""
if environment.is_running_on_app_engine():
# multiprocessing doesn't work on App Engine.
return func
@functools.wraps(func)
def _wrapper(*args, **kwargs):
"""Wrapper."""
# FIXME: Weird exceptions in imports, might be something relating to our
# reload module. Needs further investigation, try this as a temporary fix.
import multiprocessing.pool
import threading
# Fix for Python < 2.7.2.
if not hasattr(threading.current_thread(), '_children'):
# pylint: disable=protected-access
threading.current_thread()._children = weakref.WeakKeyDictionary()
global THREAD_POOL
if THREAD_POOL is None:
THREAD_POOL = multiprocessing.pool.ThreadPool(processes=3)
try:
from clusterfuzz._internal.datastore import \
ndb_init # Avoid circular import.
async_result = THREAD_POOL.apply_async(
ndb_init.thread_wrapper(func), args=args, kwds=kwargs)
return async_result.get(timeout=duration)
except multiprocessing.TimeoutError:
# Sleep for some minutes in order to wait for flushing metrics.
time.sleep(120)
# If we don't exit here, we will cause threads to pile up and leading to
# out-of-memory. Safe to just exit here.
logs.log_fatal_and_exit(
('Exception occurred in function {0}: args: {1}, kwargs: {2}'
' exception: {3}').format(func, args, kwargs,
sys.exc_info()[1]))
return _wrapper
return decorator
def wait_until_timeout(threads, thread_timeout):
"""Wait for all threads to finish unless the given timeout is reached.
If no thread is alive, it waits much shorter than the given timeout.
Return True if timeout is exceeded, and return False otherwise.
"""
thread_alive_check_interval = environment.get_value(
'THREAD_ALIVE_CHECK_INTERVAL')
if not thread_alive_check_interval:
time.sleep(thread_timeout)
return False
wait_timeout = time.time() + thread_timeout
while time.time() < wait_timeout:
time.sleep(thread_alive_check_interval)
thread_alive = False
for thread in threads:
if thread.is_alive():
thread_alive = True
break
if not thread_alive:
return False
return True
def write_data_to_file(content, file_path, append=False):
"""Writes data to file."""
failure_wait_interval = environment.get_value('FAIL_WAIT')
file_mode = 'ab' if append else 'wb'
retry_limit = environment.get_value('FAIL_RETRIES')
# TODO(mbarbella): One extra iteration is allowed for the type conversion hack
# included here. Once this function is converted to only accept bytes-like
# objects, it should be adjusted back to the normal retry limit.
for _ in range(retry_limit + 1):
try:
with open(file_path, file_mode) as file_handle:
file_handle.write(content)
except TypeError:
# If we saw a TypeError, content was not bytes-like. Convert it.
content = str(content).encode('utf-8')
continue
except EnvironmentError:
# An EnvironmentError signals a problem writing the file. Retry in case
# it was a spurious error.
logs.log_warn('Error occurred while writing %s, retrying.' % file_path)
time.sleep(random.uniform(1, failure_wait_interval))
continue
# Successfully written data file.
return
logs.log_error('Failed to write data to file %s.' % file_path)
@memoize.wrap(memoize.FifoInMemory(1))
def default_backup_bucket():
"""Return the default backup bucket for this instance of ClusterFuzz."""
# Do not use |BACKUP_BUCKET| environment variable as that is the overridden
# backup bucket from job type and is not the default backup bucket.
return local_config.ProjectConfig().get('env.BACKUP_BUCKET')
@memoize.wrap(memoize.FifoInMemory(1))
def default_project_name():
"""Return the default project name for this instance of ClusterFuzz."""
# Do not use |PROJECT_NAME| environment variable as that is the overridden
# project name from job type and is not the default project name.
return local_config.ProjectConfig().get('env.PROJECT_NAME')
def current_project():
"""Return the project for the current job, or the default project."""
return environment.get_value('PROJECT_NAME', default_project_name())
def current_source_version():
"""Return the current source revision."""
# For test use.
source_version_override = environment.get_value('SOURCE_VERSION_OVERRIDE')
if source_version_override:
return source_version_override
root_directory = environment.get_value('ROOT_DIR')
local_manifest_path = os.path.join(root_directory, LOCAL_SOURCE_MANIFEST)
if os.path.exists(local_manifest_path):
return read_data_from_file(
local_manifest_path, eval_data=False).strip().decode('utf-8')
return None
def read_from_handle_truncated(file_handle, max_len):
"""Read from file handle, limiting output to |max_len| by removing output in
the middle."""
file_handle.seek(0, os.SEEK_END)
file_size = file_handle.tell()
file_handle.seek(0, os.SEEK_SET)
if file_size <= max_len:
return file_handle.read()
# Read first and last |half_max_len| bytes.
half_max_len = max_len // 2
start = file_handle.read(half_max_len)
file_handle.seek(file_size - half_max_len, os.SEEK_SET)
end = file_handle.read(half_max_len)
truncated_marker = b'\n...truncated %d bytes...\n' % (file_size - max_len)
return start + truncated_marker + end
def normalize_email(email):
"""Normalize an email address."""
# TODO(ochang): Investigate whether if it makes sense to replace
# @googlemail.com with @gmail.com.
return email.lower()
def emails_equal(first, second):
"""Return whether or not the 2 emails are equal after being normalized."""
if not first or not second:
return False
return normalize_email(first) == normalize_email(second)
def parse_delimited(value_or_handle, delimiter, strip=False,
remove_empty=False):
"""Parse a delimter separated value."""
if hasattr(value_or_handle, 'read'):
results = value_or_handle.read().split(delimiter)
else:
results = value_or_handle.split(delimiter)
if not strip and not remove_empty:
return results
processed_results = []
for result in results:
if strip:
result = result.strip()
if remove_empty and not result:
continue
processed_results.append(result)
return processed_results
def is_oss_fuzz():
"""If this is an instance of OSS-Fuzz."""
return default_project_name() == 'oss-fuzz'
def is_chromium():
"""If this is an instance of chromium fuzzing."""
return default_project_name() == 'chromium'
def file_hash(file_path):
"""Returns the SHA-1 hash of |file_path| contents."""
chunk_size = 51200 # Read in 50 KB chunks.
digest = hashlib.sha1()
with open(file_path, 'rb') as file_handle:
chunk = file_handle.read(chunk_size)
while chunk:
digest.update(chunk)
chunk = file_handle.read(chunk_size)
return digest.hexdigest()
def cpu_count():
"""Get the CPU count."""
# Does not import on App Engine.
import multiprocessing
return environment.get_value('CPU_COUNT_OVERRIDE',
multiprocessing.cpu_count())
| |
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from datetime import datetime
import time
from uuid import uuid1, uuid4
import uuid
from cassandra.cluster import Session
from tests.integration.cqlengine.base import BaseCassEngTestCase
from cassandra.cqlengine.connection import NOT_SET
import mock
from cassandra.cqlengine import functions
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns
from cassandra.cqlengine import query
from datetime import timedelta
from datetime import tzinfo
from cassandra.cqlengine import statements
from cassandra.cqlengine import operators
from cassandra.util import uuid_from_time
from cassandra.cqlengine.connection import get_session
from tests.integration import PROTOCOL_VERSION
class TzOffset(tzinfo):
"""Minimal implementation of a timezone offset to help testing with timezone
aware datetimes.
"""
def __init__(self, offset):
self._offset = timedelta(hours=offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return 'TzOffset: {}'.format(self._offset.hours)
def dst(self, dt):
return timedelta(0)
class TestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(primary_key=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer()
class IndexedTestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(index=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer(index=True)
class TestMultiClusteringModel(Model):
one = columns.Integer(primary_key=True)
two = columns.Integer(primary_key=True)
three = columns.Integer(primary_key=True)
class TestQuerySetOperation(BaseCassEngTestCase):
def test_query_filter_parsing(self):
"""
Tests the queryset filter method parses it's kwargs properly
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op, statements.WhereClause)
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
op = query2._where[1]
self.assertIsInstance(op, statements.WhereClause)
self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_query_expression_parsing(self):
""" Tests that query experessions are evaluated properly """
query1 = TestModel.filter(TestModel.test_id == 5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op, statements.WhereClause)
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(TestModel.expected_result >= 1)
assert len(query2._where) == 2
op = query2._where[1]
self.assertIsInstance(op, statements.WhereClause)
self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_using_invalid_column_names_in_filter_kwargs_raises_error(self):
"""
Tests that using invalid or nonexistant column names for filter args raises an error
"""
with self.assertRaises(query.QueryException):
TestModel.objects(nonsense=5)
def test_using_nonexistant_column_names_in_query_args_raises_error(self):
"""
Tests that using invalid or nonexistant columns for query args raises an error
"""
with self.assertRaises(AttributeError):
TestModel.objects(TestModel.nonsense == 5)
def test_using_non_query_operators_in_query_args_raises_error(self):
"""
Tests that providing query args that are not query operator instances raises an error
"""
with self.assertRaises(query.QueryException):
TestModel.objects(5)
def test_queryset_is_immutable(self):
"""
Tests that calling a queryset function that changes it's state returns a new queryset
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
assert len(query1._where) == 1
def test_queryset_limit_immutability(self):
"""
Tests that calling a queryset function that changes it's state returns a new queryset with same limit
"""
query1 = TestModel.objects(test_id=5).limit(1)
assert query1._limit == 1
query2 = query1.filter(expected_result__gte=1)
assert query2._limit == 1
query3 = query1.filter(expected_result__gte=1).limit(2)
assert query1._limit == 1
assert query3._limit == 2
def test_the_all_method_duplicates_queryset(self):
"""
Tests that calling all on a queryset with previously defined filters duplicates queryset
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
query3 = query2.all()
assert query3 == query2
def test_defining_only_and_defer_fails(self):
"""
Tests that trying to add fields to either only or defer, or doing so more than once fails
"""
def test_defining_only_or_defer_on_nonexistant_fields_fails(self):
"""
Tests that setting only or defer fields that don't exist raises an exception
"""
class BaseQuerySetUsage(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(BaseQuerySetUsage, cls).setUpClass()
drop_table(TestModel)
drop_table(IndexedTestModel)
sync_table(TestModel)
sync_table(IndexedTestModel)
sync_table(TestMultiClusteringModel)
TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=2, description='try3', expected_result=15, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=3, description='try4', expected_result=20, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=0, description='try5', expected_result=5, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=1, description='try6', expected_result=10, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=2, description='try7', expected_result=15, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=3, description='try8', expected_result=20, test_result=20)
TestModel.objects.create(test_id=2, attempt_id=0, description='try9', expected_result=50, test_result=40)
TestModel.objects.create(test_id=2, attempt_id=1, description='try10', expected_result=60, test_result=40)
TestModel.objects.create(test_id=2, attempt_id=2, description='try11', expected_result=70, test_result=45)
TestModel.objects.create(test_id=2, attempt_id=3, description='try12', expected_result=75, test_result=45)
IndexedTestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30)
IndexedTestModel.objects.create(test_id=1, attempt_id=1, description='try2', expected_result=10, test_result=30)
IndexedTestModel.objects.create(test_id=2, attempt_id=2, description='try3', expected_result=15, test_result=30)
IndexedTestModel.objects.create(test_id=3, attempt_id=3, description='try4', expected_result=20, test_result=25)
IndexedTestModel.objects.create(test_id=4, attempt_id=0, description='try5', expected_result=5, test_result=25)
IndexedTestModel.objects.create(test_id=5, attempt_id=1, description='try6', expected_result=10, test_result=25)
IndexedTestModel.objects.create(test_id=6, attempt_id=2, description='try7', expected_result=15, test_result=25)
IndexedTestModel.objects.create(test_id=7, attempt_id=3, description='try8', expected_result=20, test_result=20)
IndexedTestModel.objects.create(test_id=8, attempt_id=0, description='try9', expected_result=50, test_result=40)
IndexedTestModel.objects.create(test_id=9, attempt_id=1, description='try10', expected_result=60,
test_result=40)
IndexedTestModel.objects.create(test_id=10, attempt_id=2, description='try11', expected_result=70,
test_result=45)
IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75,
test_result=45)
@classmethod
def tearDownClass(cls):
super(BaseQuerySetUsage, cls).tearDownClass()
drop_table(TestModel)
drop_table(IndexedTestModel)
drop_table(TestMultiClusteringModel)
class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage):
def test_count(self):
""" Tests that adding filtering statements affects the count query as expected """
assert TestModel.objects.count() == 12
q = TestModel.objects(test_id=0)
assert q.count() == 4
def test_query_expression_count(self):
""" Tests that adding query statements affects the count query as expected """
assert TestModel.objects.count() == 12
q = TestModel.objects(TestModel.test_id == 0)
assert q.count() == 4
def test_iteration(self):
""" Tests that iterating over a query set pulls back all of the expected results """
q = TestModel.objects(test_id=0)
#tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with regular filtering
q = TestModel.objects(attempt_id=3).allow_filtering()
assert len(q) == 3
#tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with query method
q = TestModel.objects(TestModel.attempt_id == 3).allow_filtering()
assert len(q) == 3
#tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
def test_multiple_iterations_work_properly(self):
""" Tests that iterating over a query set more than once works """
# test with both the filtering method and the query method
for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):
#tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
#try it again
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
def test_multiple_iterators_are_isolated(self):
"""
tests that the use of one iterator does not affect the behavior of another
"""
for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):
q = q.order_by('attempt_id')
expected_order = [0, 1, 2, 3]
iter1 = iter(q)
iter2 = iter(q)
for attempt_id in expected_order:
assert next(iter1).attempt_id == attempt_id
assert next(iter2).attempt_id == attempt_id
def test_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = TestModel.objects.get(test_id=0, attempt_id=0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(test_id=0, attempt_id=0)
m = q.get()
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(test_id=0)
m = q.get(attempt_id=0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
def test_query_expression_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = TestModel.get(TestModel.test_id == 0, TestModel.attempt_id == 0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(TestModel.test_id == 0, TestModel.attempt_id == 0)
m = q.get()
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(TestModel.test_id == 0)
m = q.get(TestModel.attempt_id == 0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
def test_get_doesnotexist_exception(self):
"""
Tests that get calls that don't return a result raises a DoesNotExist error
"""
with self.assertRaises(TestModel.DoesNotExist):
TestModel.objects.get(test_id=100)
def test_get_multipleobjects_exception(self):
"""
Tests that get calls that return multiple results raise a MultipleObjectsReturned error
"""
with self.assertRaises(TestModel.MultipleObjectsReturned):
TestModel.objects.get(test_id=1)
def test_allow_filtering_flag(self):
"""
"""
def test_non_quality_filtering():
class NonEqualityFilteringModel(Model):
example_id = columns.UUID(primary_key=True, default=uuid.uuid4)
sequence_id = columns.Integer(primary_key=True) # sequence_id is a clustering key
example_type = columns.Integer(index=True)
created_at = columns.DateTime()
drop_table(NonEqualityFilteringModel)
sync_table(NonEqualityFilteringModel)
# setup table, etc.
NonEqualityFilteringModel.create(sequence_id=1, example_type=0, created_at=datetime.now())
NonEqualityFilteringModel.create(sequence_id=3, example_type=0, created_at=datetime.now())
NonEqualityFilteringModel.create(sequence_id=5, example_type=1, created_at=datetime.now())
qA = NonEqualityFilteringModel.objects(NonEqualityFilteringModel.sequence_id > 3).allow_filtering()
num = qA.count()
assert num == 1, num
class TestQuerySetOrdering(BaseQuerySetUsage):
def test_order_by_success_case(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q, expected_order):
assert model.attempt_id == expect
q = q.order_by('-attempt_id')
expected_order.reverse()
for model, expect in zip(q, expected_order):
assert model.attempt_id == expect
def test_ordering_by_non_second_primary_keys_fail(self):
# kwarg filtering
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_id=0).order_by('test_id')
# kwarg filtering
with self.assertRaises(query.QueryException):
q = TestModel.objects(TestModel.test_id == 0).order_by('test_id')
def test_ordering_by_non_primary_keys_fails(self):
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_id=0).order_by('description')
def test_ordering_on_indexed_columns_fails(self):
with self.assertRaises(query.QueryException):
q = IndexedTestModel.objects(test_id=0).order_by('attempt_id')
def test_ordering_on_multiple_clustering_columns(self):
TestMultiClusteringModel.create(one=1, two=1, three=4)
TestMultiClusteringModel.create(one=1, two=1, three=2)
TestMultiClusteringModel.create(one=1, two=1, three=5)
TestMultiClusteringModel.create(one=1, two=1, three=1)
TestMultiClusteringModel.create(one=1, two=1, three=3)
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('-two', '-three')
assert [r.three for r in results] == [5, 4, 3, 2, 1]
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two', 'three')
assert [r.three for r in results] == [1, 2, 3, 4, 5]
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two').order_by('three')
assert [r.three for r in results] == [1, 2, 3, 4, 5]
class TestQuerySetSlicing(BaseQuerySetUsage):
def test_out_of_range_index_raises_error(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
with self.assertRaises(IndexError):
q[10]
def test_array_indexing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for i in range(len(q)):
assert q[i].attempt_id == expected_order[i]
def test_negative_indexing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
assert q[-1].attempt_id == expected_order[-1]
assert q[-2].attempt_id == expected_order[-2]
def test_slicing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q[1:3], expected_order[1:3]):
assert model.attempt_id == expect
def test_negative_slicing(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q[-3:], expected_order[-3:]):
assert model.attempt_id == expect
for model, expect in zip(q[:-1], expected_order[:-1]):
assert model.attempt_id == expect
class TestQuerySetValidation(BaseQuerySetUsage):
def test_primary_key_or_index_must_be_specified(self):
"""
Tests that queries that don't have an equals relation to a primary key or indexed field fail
"""
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_result=25)
list([i for i in q])
def test_primary_key_or_index_must_have_equal_relation_filter(self):
"""
Tests that queries that don't have non equal (>,<, etc) relation to a primary key or indexed field fail
"""
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_id__gt=0)
list([i for i in q])
def test_indexed_field_can_be_queried(self):
"""
Tests that queries on an indexed field will work without any primary key relations specified
"""
q = IndexedTestModel.objects(test_result=25)
assert q.count() == 4
class TestQuerySetDelete(BaseQuerySetUsage):
def test_delete(self):
TestModel.objects.create(test_id=3, attempt_id=0, description='try9', expected_result=50, test_result=40)
TestModel.objects.create(test_id=3, attempt_id=1, description='try10', expected_result=60, test_result=40)
TestModel.objects.create(test_id=3, attempt_id=2, description='try11', expected_result=70, test_result=45)
TestModel.objects.create(test_id=3, attempt_id=3, description='try12', expected_result=75, test_result=45)
assert TestModel.objects.count() == 16
assert TestModel.objects(test_id=3).count() == 4
TestModel.objects(test_id=3).delete()
assert TestModel.objects.count() == 12
assert TestModel.objects(test_id=3).count() == 0
def test_delete_without_partition_key(self):
""" Tests that attempting to delete a model without defining a partition key fails """
with self.assertRaises(query.QueryException):
TestModel.objects(attempt_id=0).delete()
def test_delete_without_any_where_args(self):
""" Tests that attempting to delete a whole table without any arguments will fail """
with self.assertRaises(query.QueryException):
TestModel.objects(attempt_id=0).delete()
class TimeUUIDQueryModel(Model):
partition = columns.UUID(primary_key=True)
time = columns.TimeUUID(primary_key=True)
data = columns.Text(required=False)
class TestMinMaxTimeUUIDFunctions(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestMinMaxTimeUUIDFunctions, cls).setUpClass()
sync_table(TimeUUIDQueryModel)
@classmethod
def tearDownClass(cls):
super(TestMinMaxTimeUUIDFunctions, cls).tearDownClass()
drop_table(TimeUUIDQueryModel)
def test_tzaware_datetime_support(self):
"""Test that using timezone aware datetime instances works with the
MinTimeUUID/MaxTimeUUID functions.
"""
pk = uuid4()
midpoint_utc = datetime.utcnow().replace(tzinfo=TzOffset(0))
midpoint_helsinki = midpoint_utc.astimezone(TzOffset(3))
# Assert pre-condition that we have the same logical point in time
assert midpoint_utc.utctimetuple() == midpoint_helsinki.utctimetuple()
assert midpoint_utc.timetuple() != midpoint_helsinki.timetuple()
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc - timedelta(minutes=1)),
data='1')
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc),
data='2')
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc + timedelta(minutes=1)),
data='3')
assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_utc))]
assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_helsinki))]
assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_utc))]
assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_helsinki))]
def test_success_case(self):
""" Test that the min and max time uuid functions work as expected """
pk = uuid4()
TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='1')
time.sleep(0.2)
TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='2')
time.sleep(0.2)
midpoint = datetime.utcnow()
time.sleep(0.2)
TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='3')
time.sleep(0.2)
TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='4')
time.sleep(0.2)
# test kwarg filtering
q = TimeUUIDQueryModel.filter(partition=pk, time__lte=functions.MaxTimeUUID(midpoint))
q = [d for d in q]
self.assertEqual(len(q), 2, msg="Got: %s" % q)
datas = [d.data for d in q]
assert '1' in datas
assert '2' in datas
q = TimeUUIDQueryModel.filter(partition=pk, time__gte=functions.MinTimeUUID(midpoint))
assert len(q) == 2
datas = [d.data for d in q]
assert '3' in datas
assert '4' in datas
# test query expression filtering
q = TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint)
)
q = [d for d in q]
assert len(q) == 2
datas = [d.data for d in q]
assert '1' in datas
assert '2' in datas
q = TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint)
)
assert len(q) == 2
datas = [d.data for d in q]
assert '3' in datas
assert '4' in datas
class TestInOperator(BaseQuerySetUsage):
def test_kwarg_success_case(self):
""" Tests the in operator works with the kwarg query method """
q = TestModel.filter(test_id__in=[0, 1])
assert q.count() == 8
def test_query_expression_success_case(self):
""" Tests the in operator works with the query expression query method """
q = TestModel.filter(TestModel.test_id.in_([0, 1]))
assert q.count() == 8
class TestValuesList(BaseQuerySetUsage):
def test_values_list(self):
q = TestModel.objects.filter(test_id=0, attempt_id=1)
item = q.values_list('test_id', 'attempt_id', 'description', 'expected_result', 'test_result').first()
assert item == [0, 1, 'try2', 10, 30]
item = q.values_list('expected_result', flat=True).first()
assert item == 10
class TestObjectsProperty(BaseQuerySetUsage):
def test_objects_property_returns_fresh_queryset(self):
assert TestModel.objects._result_cache is None
len(TestModel.objects) # evaluate queryset
assert TestModel.objects._result_cache is None
class PageQueryTests(BaseCassEngTestCase):
def test_paged_result_handling(self):
if PROTOCOL_VERSION < 2:
raise unittest.SkipTest("Paging requires native protocol 2+, currently using: {0}".format(PROTOCOL_VERSION))
# addresses #225
class PagingTest(Model):
id = columns.Integer(primary_key=True)
val = columns.Integer()
sync_table(PagingTest)
PagingTest.create(id=1, val=1)
PagingTest.create(id=2, val=2)
session = get_session()
with mock.patch.object(session, 'default_fetch_size', 1):
results = PagingTest.objects()[:]
assert len(results) == 2
class ModelQuerySetTimeoutTestCase(BaseQuerySetUsage):
def test_default_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects())
self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET)
def test_float_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects().timeout(0.5))
self.assertEqual(mock_execute.call_args[-1]['timeout'], 0.5)
def test_none_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects().timeout(None))
self.assertEqual(mock_execute.call_args[-1]['timeout'], None)
class DMLQueryTimeoutTestCase(BaseQuerySetUsage):
def setUp(self):
self.model = TestModel(test_id=1, attempt_id=1, description='timeout test')
super(DMLQueryTimeoutTestCase, self).setUp()
def test_default_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET)
def test_float_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.timeout(0.5).save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], 0.5)
def test_none_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.timeout(None).save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], None)
def test_timeout_then_batch(self):
b = query.BatchQuery()
m = self.model.timeout(None)
with self.assertRaises(AssertionError):
m.batch(b)
def test_batch_then_timeout(self):
b = query.BatchQuery()
m = self.model.batch(b)
with self.assertRaises(AssertionError):
m.timeout(0.5)
| |
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
from functools import update_wrapper
from distutils.version import LooseVersion
import functools
import numpy as np
import scipy.sparse as sp
import scipy
import scipy.stats
from scipy.sparse.linalg import lsqr as sparse_lsqr # noqa
from numpy.ma import MaskedArray as _MaskedArray # TODO: remove in 1.0
from .._config import config_context, get_config
from .deprecation import deprecated
try:
from pkg_resources import parse_version # type: ignore
except ImportError:
# setuptools not installed
parse_version = LooseVersion # type: ignore
np_version = parse_version(np.__version__)
sp_version = parse_version(scipy.__version__)
if sp_version >= parse_version('1.4'):
from scipy.sparse.linalg import lobpcg
else:
# Backport of lobpcg functionality from scipy 1.4.0, can be removed
# once support for sp_version < parse_version('1.4') is dropped
# mypy error: Name 'lobpcg' already defined (possibly by an import)
from ..externals._lobpcg import lobpcg # type: ignore # noqa
def _object_dtype_isnan(X):
return X != X
# TODO: replace by copy=False, when only scipy > 1.1 is supported.
def _astype_copy_false(X):
"""Returns the copy=False parameter for
{ndarray, csr_matrix, csc_matrix}.astype when possible,
otherwise don't specify
"""
if sp_version >= parse_version('1.1') or not sp.issparse(X):
return {'copy': False}
else:
return {}
def _joblib_parallel_args(**kwargs):
"""Set joblib.Parallel arguments in a compatible way for 0.11 and 0.12+
For joblib 0.11 this maps both ``prefer`` and ``require`` parameters to
a specific ``backend``.
Parameters
----------
prefer : str in {'processes', 'threads'} or None
Soft hint to choose the default backend if no specific backend
was selected with the parallel_backend context manager.
require : 'sharedmem' or None
Hard condstraint to select the backend. If set to 'sharedmem',
the selected backend will be single-host and thread-based even
if the user asked for a non-thread based backend with
parallel_backend.
See joblib.Parallel documentation for more details
"""
import joblib
if parse_version(joblib.__version__) >= parse_version('0.12'):
return kwargs
extra_args = set(kwargs.keys()).difference({'prefer', 'require'})
if extra_args:
raise NotImplementedError('unhandled arguments %s with joblib %s'
% (list(extra_args), joblib.__version__))
args = {}
if 'prefer' in kwargs:
prefer = kwargs['prefer']
if prefer not in ['threads', 'processes', None]:
raise ValueError('prefer=%s is not supported' % prefer)
args['backend'] = {'threads': 'threading',
'processes': 'multiprocessing',
None: None}[prefer]
if 'require' in kwargs:
require = kwargs['require']
if require not in [None, 'sharedmem']:
raise ValueError('require=%s is not supported' % require)
if require == 'sharedmem':
args['backend'] = 'threading'
return args
class loguniform(scipy.stats.reciprocal):
"""A class supporting log-uniform random variables.
Parameters
----------
low : float
The minimum value
high : float
The maximum value
Methods
-------
rvs(self, size=None, random_state=None)
Generate log-uniform random variables
The most useful method for Scikit-learn usage is highlighted here.
For a full list, see
`scipy.stats.reciprocal
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.reciprocal.html>`_.
This list includes all functions of ``scipy.stats`` continuous
distributions such as ``pdf``.
Notes
-----
This class generates values between ``low`` and ``high`` or
low <= loguniform(low, high).rvs() <= high
The logarithmic probability density function (PDF) is uniform. When
``x`` is a uniformly distributed random variable between 0 and 1, ``10**x``
are random variables that are equally likely to be returned.
This class is an alias to ``scipy.stats.reciprocal``, which uses the
reciprocal distribution:
https://en.wikipedia.org/wiki/Reciprocal_distribution
Examples
--------
>>> from sklearn.utils.fixes import loguniform
>>> rv = loguniform(1e-3, 1e1)
>>> rvs = rv.rvs(random_state=42, size=1000)
>>> rvs.min() # doctest: +SKIP
0.0010435856341129003
>>> rvs.max() # doctest: +SKIP
9.97403052786026
"""
@deprecated(
'MaskedArray is deprecated in version 0.23 and will be removed in version '
'1.0 (renaming of 0.25). Use numpy.ma.MaskedArray instead.'
)
class MaskedArray(_MaskedArray):
pass # TODO: remove in 1.0
def _take_along_axis(arr, indices, axis):
"""Implements a simplified version of np.take_along_axis if numpy
version < 1.15"""
if np_version >= parse_version('1.15'):
return np.take_along_axis(arr=arr, indices=indices, axis=axis)
else:
if axis is None:
arr = arr.flatten()
if not np.issubdtype(indices.dtype, np.intp):
raise IndexError('`indices` must be an integer array')
if arr.ndim != indices.ndim:
raise ValueError(
"`indices` and `arr` must have the same number of dimensions")
shape_ones = (1,) * indices.ndim
dest_dims = (
list(range(axis)) +
[None] +
list(range(axis+1, indices.ndim))
)
# build a fancy index, consisting of orthogonal aranges, with the
# requested index inserted at the right location
fancy_index = []
for dim, n in zip(dest_dims, arr.shape):
if dim is None:
fancy_index.append(indices)
else:
ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
fancy_index.append(np.arange(n).reshape(ind_shape))
fancy_index = tuple(fancy_index)
return arr[fancy_index]
# remove when https://github.com/joblib/joblib/issues/1071 is fixed
def delayed(function):
"""Decorator used to capture the arguments of a function."""
@functools.wraps(function)
def delayed_function(*args, **kwargs):
return _FuncWrapper(function), args, kwargs
return delayed_function
class _FuncWrapper:
""""Load the global configuration before calling the function."""
def __init__(self, function):
self.function = function
self.config = get_config()
update_wrapper(self, self.function)
def __call__(self, *args, **kwargs):
with config_context(**self.config):
return self.function(*args, **kwargs)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
axis=0):
"""Implements a simplified linspace function as of numpy verion >= 1.16.
As of numpy 1.16, the arguments start and stop can be array-like and
there is an optional argument `axis`.
For simplicity, we only allow 1d array-like to be passed to start and stop.
See: https://github.com/numpy/numpy/pull/12388 and numpy 1.16 release
notes about start and stop arrays for linspace logspace and geomspace.
Returns
-------
out : ndarray of shape (num, n_start) or (num,)
The output array with `n_start=start.shape[0]` columns.
"""
if np_version < parse_version('1.16'):
start = np.asanyarray(start) * 1.0
stop = np.asanyarray(stop) * 1.0
dt = np.result_type(start, stop, float(num))
if dtype is None:
dtype = dt
if start.ndim == 0 == stop.ndim:
return np.linspace(start=start, stop=stop, num=num,
endpoint=endpoint, retstep=retstep, dtype=dtype)
if start.ndim != 1 or stop.ndim != 1 or start.shape != stop.shape:
raise ValueError("start and stop must be 1d array-like of same"
" shape.")
n_start = start.shape[0]
out = np.empty((num, n_start), dtype=dtype)
step = np.empty(n_start, dtype=np.float)
for i in range(n_start):
out[:, i], step[i] = np.linspace(start=start[i], stop=stop[i],
num=num, endpoint=endpoint,
retstep=True, dtype=dtype)
if axis != 0:
out = np.moveaxis(out, 0, axis)
if retstep:
return out, step
else:
return out
else:
return np.linspace(start=start, stop=stop, num=num, endpoint=endpoint,
retstep=retstep, dtype=dtype, axis=axis)
| |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
from builtins import str
from contextlib import contextmanager
from textwrap import dedent
from pants.backend.python.subsystems.python_repos import PythonRepos
from pants.backend.python.subsystems.python_setup import PythonSetup
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.util.contextutil import environment_as
from pants.util.dirutil import safe_mkdtemp, safe_rmtree
from pants.util.process_handler import subprocess
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
from parameterized import parameterized
from pex.interpreter import PythonInterpreter
from wheel.install import WheelFile
from pants.contrib.python.checks.tasks.checkstyle.checkstyle import Checkstyle
CHECKER_RESOLVE_METHOD = [('sys.path', True), ('resolve', False)]
class CheckstyleTest(PythonTaskTestBase):
py2_constraint = 'CPython>=2.7,<3'
py3_constraint = 'CPython>=3.4,<3.6'
@staticmethod
def build_checker_wheel(root_dir):
target = Checkstyle._CHECKER_ADDRESS_SPEC
subprocess.check_call([os.path.join(get_buildroot(), 'pants'),
'--pants-distdir={}'.format(root_dir),
'setup-py',
'--run=bdist_wheel --universal',
target])
for root, _, files in os.walk(root_dir):
for f in files:
if f.endswith('.whl'):
return os.path.join(root, f)
raise AssertionError('Failed to generate a wheel for {}'.format(target))
@staticmethod
def install_wheel(wheel, root_dir):
importable_path = os.path.join(root_dir, 'install', os.path.basename(wheel))
overrides = {path: importable_path
for path in ('purelib', 'platlib', 'headers', 'scripts', 'data')}
WheelFile(wheel).install(force=True, overrides=overrides)
return importable_path
_distdir = None
_checker_dist = None
_checker_dist_importable_path = None
@classmethod
def setUpClass(cls):
cls._distdir = safe_mkdtemp()
cls._checker_dist = cls.build_checker_wheel(cls._distdir)
cls._checker_dist_importable_path = cls.install_wheel(cls._checker_dist, cls._distdir)
@classmethod
def tearDownClass(cls):
if cls._distdir:
safe_rmtree(cls._distdir)
@classmethod
def task_type(cls):
return Checkstyle
@contextmanager
def resolve_configuration(self, resolve_local=False):
if resolve_local:
# Ensure our checkstyle task runs under the same interpreter we are running under so that
# local resolves find dists compatible with the current interpreter.
current_interpreter = PythonInterpreter.get()
constraint = '{}=={}'.format(current_interpreter.identity.interpreter,
current_interpreter.identity.version_str)
self.set_options_for_scope(PythonSetup.options_scope, interpreter_constraints=[constraint])
prior = sys.path[:]
sys.path.append(self._checker_dist_importable_path)
try:
yield
finally:
sys.path = prior
else:
self.set_options_for_scope(PythonRepos.options_scope,
repos=[os.path.dirname(self._checker_dist)])
self.set_options_for_scope(PythonSetup.options_scope,
resolver_allow_prereleases=True)
yield
def execute_task(self, target_roots=None, resolve_local=False):
with self.resolve_configuration(resolve_local=resolve_local):
with environment_as(PANTS_DEV=None, PEX_VERBOSE='9'):
context = self.context(target_roots=target_roots)
return self.create_task(context).execute()
def create_py2_failing_target(self):
# Has 4 lint errors
self.create_file('a/python/fail_py2.py', contents=dedent("""
x=2+3
print x+7
"""))
return self.make_target('a/python:fail2', PythonLibrary, sources=['fail_py2.py'],
compatibility=[self.py2_constraint])
def create_py3_failing_target(self):
# Has 3 lint errors
self.create_file('a/python/fail_py3.py', contents=dedent("""
x=2+3
print(x+7)
"""))
return self.make_target('a/python:fail3', PythonLibrary, sources=['fail_py3.py'],
compatibility=[self.py3_constraint])
@parameterized.expand(CHECKER_RESOLVE_METHOD)
def test_no_sources(self, unused_test_name, resolve_local):
self.assertEqual(0, self.execute_task(resolve_local=resolve_local))
@parameterized.expand(CHECKER_RESOLVE_METHOD)
def test_pass(self, unused_test_name, resolve_local):
self.create_file('a/python/pass.py', contents=dedent("""
class UpperCase(object):
pass
"""))
target = self.make_target('a/python:pass', PythonLibrary, sources=['pass.py'])
self.assertEqual(0, self.execute_task(target_roots=[target], resolve_local=resolve_local))
@parameterized.expand(CHECKER_RESOLVE_METHOD)
def test_failure(self, unused_test_name, resolve_local):
self.create_file('a/python/fail.py', contents=dedent("""
class lower_case(object):
pass
"""))
target = self.make_target('a/python:fail', PythonLibrary, sources=['fail.py'])
with self.assertRaises(TaskError) as task_error:
self.execute_task(target_roots=[target], resolve_local=resolve_local)
self.assertIn('1 Python Style issues found', str(task_error.exception))
@parameterized.expand(CHECKER_RESOLVE_METHOD)
def test_suppressed_file_passes(self, unused_test_name, resolve_local):
self.create_file('a/python/fail.py', contents=dedent("""
class lower_case(object):
pass
"""))
suppression_file = self.create_file('suppress.txt', contents=dedent("""
a/python/fail\.py::variable-names"""))
target = self.make_target('a/python:fail', PythonLibrary, sources=['fail.py'])
self.set_options(suppress=suppression_file)
self.assertEqual(0, self.execute_task(target_roots=[target], resolve_local=resolve_local))
@parameterized.expand(CHECKER_RESOLVE_METHOD)
def test_failure_fail_false(self, unused_test_name, resolve_local):
self.create_file('a/python/fail.py', contents=dedent("""
class lower_case(object):
pass
"""))
target = self.make_target('a/python:fail', PythonLibrary, sources=['fail.py'])
self.set_options(fail=False)
self.assertEqual(1, self.execute_task(target_roots=[target], resolve_local=resolve_local))
@parameterized.expand(CHECKER_RESOLVE_METHOD)
def test_syntax_error(self, unused_test_name, resolve_local):
self.create_file('a/python/error.py', contents=dedent("""
invalid python
"""))
target = self.make_target('a/python:error', PythonLibrary, sources=['error.py'])
self.set_options(fail=False)
self.assertEqual(1, self.execute_task(target_roots=[target], resolve_local=resolve_local))
def test_lint_runs_for_blanket_whitelist(self):
target_py2 = self.create_py2_failing_target()
target_py3 = self.create_py3_failing_target()
self.set_options(interpreter_constraints_whitelist=[])
with self.assertRaises(TaskError) as task_error:
self.execute_task(target_roots=[target_py2, target_py3])
self.assertIn('7 Python Style issues found', str(task_error.exception))
def test_lint_runs_for_single_whitelisted_constraints(self):
target_py3 = self.create_py3_failing_target()
self.set_options(interpreter_constraints_whitelist=[self.py3_constraint])
with self.assertRaises(TaskError) as task_error:
self.execute_task(target_roots=[target_py3])
self.assertIn('3 Python Style issues found', str(task_error.exception))
def test_lint_runs_for_multiple_whitelisted_constraints(self):
target_py2 = self.create_py2_failing_target()
target_py3 = self.create_py3_failing_target()
self.set_options(interpreter_constraints_whitelist=[self.py2_constraint, self.py3_constraint])
with self.assertRaises(TaskError) as task_error:
self.execute_task(target_roots=[target_py2, target_py3])
self.assertIn('7 Python Style issues found', str(task_error.exception))
def test_lint_runs_for_default_constraints_and_matching_whitelist(self):
target_py2 = self.create_py2_failing_target()
target_py3 = self.create_py3_failing_target()
self.set_options(interpreter_constraints_whitelist=[self.py3_constraint])
with self.assertRaises(TaskError) as task_error:
self.execute_task(target_roots=[target_py2, target_py3])
self.assertIn('7 Python Style issues found', str(task_error.exception))
| |
# Copyright 2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from datetime import datetime
from functools import wraps
import jsonpickle
from flask import Blueprint, Response, request
from werkzeug.wrappers import BaseResponse
from almanach.common.exceptions.almanach_entity_not_found_exception import AlmanachEntityNotFoundException
from almanach.common.exceptions.authentication_failure_exception import AuthenticationFailureException
from almanach.common.exceptions.multiple_entities_matching_query import MultipleEntitiesMatchingQuery
from almanach.common.exceptions.validation_exception import InvalidAttributeException
from almanach.common.exceptions.date_format_exception import DateFormatException
api = Blueprint("api", __name__)
controller = None
auth_adapter = None
def to_json(api_call):
def encode(data):
return jsonpickle.encode(data, unpicklable=False)
@wraps(api_call)
def decorator(*args, **kwargs):
try:
result = api_call(*args, **kwargs)
return result if isinstance(result, BaseResponse) \
else Response(encode(result), 200, {"Content-Type": "application/json"})
except DateFormatException as e:
logging.warning(e.message)
return Response(encode({"error": e.message}), 400, {"Content-Type": "application/json"})
except KeyError as e:
message = "The '{param}' param is mandatory for the request you have made.".format(param=e.message)
logging.warning(message)
return encode({"error": message}), 400, {"Content-Type": "application/json"}
except TypeError:
message = "The request you have made must have data. None was given."
logging.warning(message)
return encode({"error": message}), 400, {"Content-Type": "application/json"}
except InvalidAttributeException as e:
logging.warning(e.get_error_message())
return encode({"error": e.get_error_message()}), 400, {"Content-Type": "application/json"}
except MultipleEntitiesMatchingQuery as e:
logging.warning(e.message)
return encode({"error": "Multiple entities found while updating closed"}), 400, {
"Content-Type": "application/json"}
except AlmanachEntityNotFoundException as e:
logging.warning(e.message)
return encode({"error": "Entity not found"}), 404, {"Content-Type": "application/json"}
except Exception as e:
logging.exception(e)
return Response(encode({"error": e.message}), 500, {"Content-Type": "application/json"})
return decorator
def authenticated(api_call):
@wraps(api_call)
def decorator(*args, **kwargs):
try:
auth_adapter.validate(request.headers.get('X-Auth-Token'))
return api_call(*args, **kwargs)
except AuthenticationFailureException as e:
logging.error("Authentication failure: {0}".format(e.message))
return Response('Unauthorized', 401)
return decorator
@api.route("/info", methods=["GET"])
@to_json
def get_info():
logging.info("Get application info")
return controller.get_application_info()
@api.route("/project/<project_id>/instance", methods=["POST"])
@authenticated
@to_json
def create_instance(project_id):
instance = json.loads(request.data)
logging.info("Creating instance for tenant %s with data %s", project_id, instance)
controller.create_instance(
tenant_id=project_id,
instance_id=instance['id'],
create_date=instance['created_at'],
flavor=instance['flavor'],
os_type=instance['os_type'],
distro=instance['os_distro'],
version=instance['os_version'],
name=instance['name'],
metadata={}
)
return Response(status=201)
@api.route("/instance/<instance_id>", methods=["DELETE"])
@authenticated
@to_json
def delete_instance(instance_id):
data = json.loads(request.data)
logging.info("Deleting instance with id %s with data %s", instance_id, data)
controller.delete_instance(
instance_id=instance_id,
delete_date=data['date']
)
return Response(status=202)
@api.route("/instance/<instance_id>/resize", methods=["PUT"])
@authenticated
@to_json
def resize_instance(instance_id):
instance = json.loads(request.data)
logging.info("Resizing instance with id %s with data %s", instance_id, instance)
controller.resize_instance(
instance_id=instance_id,
resize_date=instance['date'],
flavor=instance['flavor']
)
return Response(status=200)
@api.route("/instance/<instance_id>/rebuild", methods=["PUT"])
@authenticated
@to_json
def rebuild_instance(instance_id):
instance = json.loads(request.data)
logging.info("Rebuilding instance with id %s with data %s", instance_id, instance)
controller.rebuild_instance(
instance_id=instance_id,
distro=instance['distro'],
version=instance['version'],
os_type=instance['os_type'],
rebuild_date=instance['rebuild_date'],
)
return Response(status=200)
@api.route("/project/<project_id>/instances", methods=["GET"])
@authenticated
@to_json
def list_instances(project_id):
start, end = get_period()
logging.info("Listing instances between %s and %s", start, end)
return controller.list_instances(project_id, start, end)
@api.route("/project/<project_id>/volume", methods=["POST"])
@authenticated
@to_json
def create_volume(project_id):
volume = json.loads(request.data)
logging.info("Creating volume for tenant %s with data %s", project_id, volume)
controller.create_volume(
project_id=project_id,
volume_id=volume['volume_id'],
start=volume['start'],
volume_type=volume['volume_type'],
size=volume['size'],
volume_name=volume['volume_name'],
attached_to=volume['attached_to']
)
return Response(status=201)
@api.route("/volume/<volume_id>", methods=["DELETE"])
@authenticated
@to_json
def delete_volume(volume_id):
data = json.loads(request.data)
logging.info("Deleting volume with id %s with data %s", volume_id, data)
controller.delete_volume(
volume_id=volume_id,
delete_date=data['date']
)
return Response(status=202)
@api.route("/volume/<volume_id>/resize", methods=["PUT"])
@authenticated
@to_json
def resize_volume(volume_id):
volume = json.loads(request.data)
logging.info("Resizing volume with id %s with data %s", volume_id, volume)
controller.resize_volume(
volume_id=volume_id,
size=volume['size'],
update_date=volume['date']
)
return Response(status=200)
@api.route("/volume/<volume_id>/attach", methods=["PUT"])
@authenticated
@to_json
def attach_volume(volume_id):
volume = json.loads(request.data)
logging.info("Attaching volume with id %s with data %s", volume_id, volume)
controller.attach_volume(
volume_id=volume_id,
date=volume['date'],
attachments=volume['attachments']
)
return Response(status=200)
@api.route("/volume/<volume_id>/detach", methods=["PUT"])
@authenticated
@to_json
def detach_volume(volume_id):
volume = json.loads(request.data)
logging.info("Detaching volume with id %s with data %s", volume_id, volume)
controller.detach_volume(
volume_id=volume_id,
date=volume['date'],
attachments=volume['attachments']
)
return Response(status=200)
@api.route("/project/<project_id>/volumes", methods=["GET"])
@authenticated
@to_json
def list_volumes(project_id):
start, end = get_period()
logging.info("Listing volumes between %s and %s", start, end)
return controller.list_volumes(project_id, start, end)
@api.route("/project/<project_id>/entities", methods=["GET"])
@authenticated
@to_json
def list_entity(project_id):
start, end = get_period()
logging.info("Listing entities between %s and %s", start, end)
return controller.list_entities(project_id, start, end)
@api.route("/entity/instance/<instance_id>", methods=["PUT"])
@authenticated
@to_json
def update_instance_entity(instance_id):
data = json.loads(request.data)
logging.info("Updating instance entity with id %s with data %s", instance_id, data)
if 'start' in request.args:
start, end = get_period()
result = controller.update_inactive_entity(instance_id=instance_id, start=start, end=end, **data)
else:
result = controller.update_active_instance_entity(instance_id=instance_id, **data)
return result
@api.route("/entity/<entity_id>", methods=["HEAD"])
@authenticated
def entity_exists(entity_id):
logging.info("Does entity with id %s exists", entity_id)
response = Response('', 404)
if controller.entity_exists(entity_id=entity_id):
response = Response('', 200)
return response
@api.route("/entity/<entity_id>", methods=["GET"])
@authenticated
@to_json
def get_entity(entity_id):
return controller.get_all_entities_by_id(entity_id)
@api.route("/volume_types", methods=["GET"])
@authenticated
@to_json
def list_volume_types():
logging.info("Listing volumes types")
return controller.list_volume_types()
@api.route("/volume_type/<type_id>", methods=["GET"])
@authenticated
@to_json
def get_volume_type(type_id):
logging.info("Get volumes type for id %s", type_id)
return controller.get_volume_type(type_id)
@api.route("/volume_type", methods=["POST"])
@authenticated
@to_json
def create_volume_type():
volume_type = json.loads(request.data)
logging.info("Creating volume type with data '%s'", volume_type)
controller.create_volume_type(
volume_type_id=volume_type['type_id'],
volume_type_name=volume_type['type_name']
)
return Response(status=201)
@api.route("/volume_type/<type_id>", methods=["DELETE"])
@authenticated
@to_json
def delete_volume_type(type_id):
logging.info("Deleting volume type with id '%s'", type_id)
controller.delete_volume_type(type_id)
return Response(status=202)
def get_period():
start = datetime.strptime(request.args["start"], "%Y-%m-%d %H:%M:%S.%f")
if "end" not in request.args:
end = datetime.now()
else:
end = datetime.strptime(request.args["end"], "%Y-%m-%d %H:%M:%S.%f")
return start, end
| |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.parties.PartyCog
from panda3d.core import AnimControl, CollideMask, CollisionNode, CollisionSphere, CollisionTube, Mat4, NodePath, Point3, TextNode, Vec4
import math
from direct.actor.Actor import Actor
from direct.interval.ActorInterval import ActorInterval
from direct.interval.MetaInterval import Sequence, Parallel
from direct.interval.FunctionInterval import Func, Wait
from direct.interval.SoundInterval import SoundInterval
from direct.interval.LerpInterval import LerpScaleInterval, LerpFunc
from direct.showbase.PythonUtil import bound as clamp
from direct.task import Task
from direct.fsm.FSM import FSM
from toontown.toonbase import ToontownGlobals
from toontown.battle.BattleProps import globalPropPool
from toontown.battle.BattleSounds import globalBattleSoundCache
import PartyGlobals
class PartyCogManager:
def __init__(self):
self.cogs = []
def generateCog(self, parentNode, bounceSpeed = 3, bounceHeight = 1, rotateSpeed = 1, heightShift = 1, xMoveSpeed = 0, xMoveDistance = 0, bounceOffset = 0):
cog = PartyCog(parentNode, len(self.cogs), bounceSpeed, bounceHeight, rotateSpeed, heightShift, xMoveSpeed, xMoveDistance, bounceOffset)
self.cogs.append(cog)
return cog
def unload(self):
for cog in self.cogs:
cog.unload()
def updateDistances(self, distances):
for i in xrange(len(distances)):
self.cogs[i].updateDistance(distances[i])
class PartyCog(FSM):
notify = directNotify.newCategory('PartyCog')
HpTextGenerator = TextNode('HpTextGenerator')
hpText = None
height = 7
def __init__(self, parentNode, id, bounceSpeed = 3, bounceHeight = 1, rotateSpeed = 1, heightShift = 1, xMoveSpeed = 0, xMoveDistance = 0, bounceOffset = 0):
self.id = id
FSM.__init__(self, 'PartyCogFSM-%d' % self.id)
self.showFacingStatus = False
self.xMoveSpeed = xMoveSpeed
self.xMoveDistance = xMoveDistance
self.heightShift = heightShift
self.bounceSpeed = bounceSpeed
self.bounceHeight = bounceHeight
self.rotateSpeed = rotateSpeed
self.parentNode = parentNode
self.bounceOffset = bounceOffset
self.hitInterval = None
self.kaboomTrack = None
self.resetRollIval = None
self.netTimeSentToStartByHit = 0
self.load()
self.request('Down')
return
def load(self):
self.root = NodePath('PartyCog-%d' % self.id)
self.root.reparentTo(self.parentNode)
path = 'phase_13/models/parties/cogPinata_'
self.actor = Actor(path + 'actor', {'idle': path + 'idle_anim',
'down': path + 'down_anim',
'up': path + 'up_anim',
'bodyHitBack': path + 'bodyHitBack_anim',
'bodyHitFront': path + 'bodyHitFront_anim',
'headHitBack': path + 'headHitBack_anim',
'headHitFront': path + 'headHitFront_anim'})
self.actor.reparentTo(self.root)
self.temp_transform = Mat4()
self.head_locator = self.actor.attachNewNode('temphead')
self.bodyColl = CollisionTube(0, 0, 1, 0, 0, 5.75, 0.75)
self.bodyColl.setTangible(1)
self.bodyCollNode = CollisionNode('PartyCog-%d-Body-Collision' % self.id)
self.bodyCollNode.setCollideMask(ToontownGlobals.PieBitmask)
self.bodyCollNode.addSolid(self.bodyColl)
self.bodyCollNodePath = self.root.attachNewNode(self.bodyCollNode)
self.headColl = CollisionTube(0, 0, 3, 0, 0, 3.0, 1.5)
self.headColl.setTangible(1)
self.headCollNode = CollisionNode('PartyCog-%d-Head-Collision' % self.id)
self.headCollNode.setCollideMask(ToontownGlobals.PieBitmask)
self.headCollNode.addSolid(self.headColl)
self.headCollNodePath = self.root.attachNewNode(self.headCollNode)
self.arm1Coll = CollisionSphere(1.65, 0, 3.95, 1.0)
self.arm1Coll.setTangible(1)
self.arm1CollNode = CollisionNode('PartyCog-%d-Arm1-Collision' % self.id)
self.arm1CollNode.setCollideMask(ToontownGlobals.PieBitmask)
self.arm1CollNode.addSolid(self.arm1Coll)
self.arm1CollNodePath = self.root.attachNewNode(self.arm1CollNode)
self.arm2Coll = CollisionSphere(-1.65, 0, 3.45, 1.0)
self.arm2Coll.setTangible(1)
self.arm2CollNode = CollisionNode('PartyCog-%d-Arm2-Collision' % self.id)
self.arm2CollNode.setCollideMask(ToontownGlobals.PieBitmask)
self.arm2CollNode.addSolid(self.arm2Coll)
self.arm2CollNodePath = self.root.attachNewNode(self.arm2CollNode)
splatName = 'splat-creampie'
self.splat = globalPropPool.getProp(splatName)
self.splat.setBillboardPointEye()
self.splatType = globalPropPool.getPropType(splatName)
self.pieHitSound = globalBattleSoundCache.getSound('AA_wholepie_only.ogg')
self.upSound = globalBattleSoundCache.getSound('AV_jump_to_side.ogg')
self.hole = loader.loadModel('phase_13/models/parties/cogPinataHole')
self.hole.setTransparency(True)
self.hole.setP(-90.0)
self.hole.setScale(3)
self.hole.setBin('ground', 3)
self.hole.reparentTo(self.parentNode)
def unload(self):
self.request('Off')
self.clearHitInterval()
if self.hole is not None:
self.hole.removeNode()
self.hole = None
if self.actor is not None:
self.actor.cleanup()
self.actor.removeNode()
self.actor = None
if self.root is not None:
self.root.removeNode()
self.root = None
if self.kaboomTrack is not None and self.kaboomTrack.isPlaying():
self.kaboomTrack.finish()
self.kaboomTrack = None
if self.resetRollIval is not None and self.resetRollIval.isPlaying():
self.resetRollIval.finish()
self.resetRollIval = None
if self.hitInterval is not None and self.hitInterval.isPlaying():
self.hitInterval.finish()
self.hitInterval = None
del self.upSound
del self.pieHitSound
return
def enterStatic(self):
pass
def exitStatic(self):
pass
def enterActive(self, startTime):
self.root.setR(0.0)
updateTask = Task.Task(self.updateTask)
updateTask.startTime = startTime
taskMgr.add(updateTask, 'PartyCog.update-%d' % self.id)
def exitActive(self):
taskMgr.remove('PartyCog.update-%d' % self.id)
taskMgr.remove('PartyCog.bounceTask-%d' % self.id)
self.clearHitInterval()
self.resetRollIval = self.root.hprInterval(0.5, Point3(self.root.getH(), 0.0, 0.0), blendType='easeInOut')
self.resetRollIval.start()
self.actor.stop()
def enterDown(self):
if self.oldState == 'Off':
downAnimControl = self.actor.getAnimControl('down')
self.actor.pose('down', downAnimControl.getNumFrames() - 1)
return
self.clearHitInterval()
startScale = self.hole.getScale()
endScale = Point3(5, 5, 5)
self.hitInterval = Sequence(LerpFunc(self.setAlongSpline, duration=1.0, fromData=self.currentT, toData=0.0), LerpScaleInterval(self.hole, duration=0.175, scale=endScale, startScale=startScale, blendType='easeIn'), Parallel(SoundInterval(self.upSound, volume=0.6, node=self.actor, cutOff=PartyGlobals.PARTY_COG_CUTOFF), ActorInterval(self.actor, 'down', loop=0)), LerpScaleInterval(self.hole, duration=0.175, scale=Point3(3, 3, 3), startScale=endScale, blendType='easeOut'))
self.hitInterval.start()
def exitDown(self):
self.root.setR(0.0)
self.root.setH(0.0)
self.targetDistance = 0.0
self.targetFacing = 0.0
self.currentT = 0.0
self.setAlongSpline(0.0)
self.clearHitInterval()
startScale = self.hole.getScale()
endScale = Point3(5, 5, 5)
self.hitInterval = Sequence(LerpScaleInterval(self.hole, duration=0.175, scale=endScale, startScale=startScale, blendType='easeIn'), Parallel(SoundInterval(self.upSound, volume=0.6, node=self.actor, cutOff=PartyGlobals.PARTY_COG_CUTOFF), ActorInterval(self.actor, 'up', loop=0)), Func(self.actor.loop, 'idle'), LerpScaleInterval(self.hole, duration=0.175, scale=Point3(3, 3, 3), startScale=endScale, blendType='easeOut'))
self.hitInterval.start()
def filterDown(self, request, args):
if request == 'Down':
return None
else:
return self.defaultFilter(request, args)
return None
def setEndPoints(self, start, end, amplitude = 1.7):
self.sinAmplitude = amplitude
self.sinPeriod = (end.getX() - start.getX()) / 2
self.sinDisplacement = start.getY()
self.startPoint = start
self.endPoint = end
self.currentT = 0.0
self.targetDistance = 0.0
self.currentFacing = 0.0
self.targetFacing = 0.0
self.setAlongSpline(self.currentT)
self.hole.setPos(self.root.getPos())
self.hole.setZ(0.02)
def rockBackAndForth(self, task):
t = task.startTime + task.time
angle = math.sin(t) * 20.0
self.root.setR(angle)
return task.cont
def updateDistance(self, distance):
self.targetDistance = clamp(distance, -1.0, 1.0)
def updateTask(self, task):
self.rockBackAndForth(task)
if self.targetDistance > self.currentT:
self.currentT += min(0.01, self.targetDistance - self.currentT)
self.setAlongSpline(self.currentT)
elif self.targetDistance < self.currentT:
self.currentT += max(-0.01, self.targetDistance - self.currentT)
self.setAlongSpline(self.currentT)
if self.currentT < 0.0:
self.targetFacing = -90.0
elif self.currentT > 0.0:
self.targetFacing = 90.0
else:
self.targetFacing = 0.0
if self.targetFacing > self.currentFacing:
self.currentFacing += min(10, self.targetFacing - self.currentFacing)
elif self.targetFacing < self.currentFacing:
self.currentFacing += max(-10, self.targetFacing - self.currentFacing)
self.root.setH(self.currentFacing)
return task.cont
def setAlongSpline(self, t):
t = t + 1.0
dist = (self.endPoint.getX() - self.startPoint.getX()) / 2.0
x = self.startPoint.getX() + t * dist
y = self.startPoint.getY() - math.sin(t * 2 * math.pi) * self.sinAmplitude
self.root.setPos(x, y, 0)
def startBounce(self):
taskMgr.add(self.bounce, 'PartyCog.bounceTask-%d' % self.id)
def bounce(self, task):
self.root.setZ(math.sin((self.bounceOffset + task.time) * self.bounceSpeed) * self.bounceHeight + self.heightShift)
return task.cont
def setPos(self, position):
self.root.setPos(position)
def respondToPieHit(self, timestamp, position, hot = False, direction = 1.0):
if self.netTimeSentToStartByHit < timestamp:
self.__showSplat(position, direction, hot)
if self.netTimeSentToStartByHit < timestamp:
self.netTimeSentToStartByHit = timestamp
else:
self.notify.debug('respondToPieHit self.netTimeSentToStartByHit = %s' % self.netTimeSentToStartByHit)
def clearHitInterval(self):
if self.hitInterval is not None and self.hitInterval.isPlaying():
self.hitInterval.clearToInitial()
return
def __showSplat(self, position, direction, hot = False):
if self.kaboomTrack is not None and self.kaboomTrack.isPlaying():
self.kaboomTrack.finish()
self.clearHitInterval()
splatName = 'splat-creampie'
self.splat = globalPropPool.getProp(splatName)
self.splat.setBillboardPointEye()
self.splat.reparentTo(render)
self.splat.setPos(self.root, position)
self.splat.setAlphaScale(1.0)
if not direction == 1.0:
self.splat.setColorScale(PartyGlobals.CogActivitySplatColors[0])
if self.currentFacing > 0.0:
facing = 'HitFront'
else:
facing = 'HitBack'
else:
self.splat.setColorScale(PartyGlobals.CogActivitySplatColors[1])
if self.currentFacing > 0.0:
facing = 'HitBack'
else:
facing = 'HitFront'
if hot:
targetscale = 0.75
part = 'head'
else:
targetscale = 0.5
part = 'body'
def setSplatAlpha(amount):
self.splat.setAlphaScale(amount)
self.hitInterval = Sequence(ActorInterval(self.actor, part + facing, loop=0), Func(self.actor.loop, 'idle'))
self.hitInterval.start()
self.kaboomTrack = Parallel(SoundInterval(self.pieHitSound, volume=1.0, node=self.actor, cutOff=PartyGlobals.PARTY_COG_CUTOFF), Sequence(Func(self.splat.showThrough), Parallel(Sequence(LerpScaleInterval(self.splat, duration=0.175, scale=targetscale, startScale=Point3(0.1, 0.1, 0.1), blendType='easeOut'), Wait(0.175)), Sequence(Wait(0.1), LerpFunc(setSplatAlpha, duration=1.0, fromData=1.0, toData=0.0, blendType='easeOut'))), Func(self.splat.cleanup), Func(self.splat.removeNode)))
self.kaboomTrack.start()
return
def showHitScore(self, number, scale = 1):
if number <= 0:
return
if self.hpText:
self.hideHitScore()
self.HpTextGenerator.setFont(ToontownGlobals.getSignFont())
if number < 0:
self.HpTextGenerator.setText(str(number))
else:
self.HpTextGenerator.setText('+' + str(number))
self.HpTextGenerator.clearShadow()
self.HpTextGenerator.setAlign(TextNode.ACenter)
r = 1
g = 1
b = 0
a = 1
self.HpTextGenerator.setTextColor(r, g, b, a)
self.hpTextNode = self.HpTextGenerator.generate()
self.hpText = render.attachNewNode(self.hpTextNode)
self.hpText.setScale(scale)
self.hpText.setBillboardPointEye()
self.hpText.setBin('fixed', 100)
self.hpText.setPos(self.root, 0, 0, self.height / 2)
seq = Sequence(self.hpText.posInterval(0.25, Point3(self.root.getX(render), self.root.getY(render), self.root.getZ(render) + self.height + 1.0), blendType='easeOut'), Wait(0.25), self.hpText.colorInterval(0.1, Vec4(r, g, b, 0)), Func(self.__hideHitScore))
seq.start()
def hideHitScore(self):
if self.hpText:
taskMgr.remove('PartyCogHpText' + str(self.id))
self.hpText.removeNode()
self.hpText = None
return
def getHeadLocation(self):
self.actor.getJoints(jointName='head')[0].getNetTransform(self.temp_transform)
self.head_locator.setMat(self.temp_transform)
return self.head_locator.getZ(self.root)
| |
# -*- coding: utf-8 -*-
import re
from datetime import timedelta, date, datetime
import operator
import vim
from orgmode._vim import ORGMODE, echom, insert_at_cursor, get_user_input
from orgmode import settings
from orgmode.keybinding import Keybinding, Plug
from orgmode.menu import Submenu, ActionEntry, add_cmd_mapping_menu
from orgmode.py3compat.encode_compatibility import *
from orgmode.py3compat.unicode_compatibility import *
from orgmode.py3compat.py_py3_string import *
class Date(object):
u"""
Handles all date and timestamp related tasks.
TODO: extend functionality (calendar, repetitions, ranges). See
http://orgmode.org/guide/Dates-and-Times.html#Dates-and-Times
"""
date_regex = r"\d\d\d\d-\d\d-\d\d"
datetime_regex = r"[A-Z]\w\w \d\d\d\d-\d\d-\d\d \d\d:\d\d>"
month_mapping = {
u'jan': 1, u'feb': 2, u'mar': 3, u'apr': 4, u'may': 5,
u'jun': 6, u'jul': 7, u'aug': 8, u'sep': 9, u'oct': 10, u'nov': 11,
u'dec': 12}
def __init__(self):
u""" Initialize plugin """
object.__init__(self)
# menu entries this plugin should create
self.menu = ORGMODE.orgmenu + Submenu(u'Dates and Scheduling')
# key bindings for this plugin
# key bindings are also registered through the menu so only additional
# bindings should be put in this variable
self.keybindings = []
# commands for this plugin
self.commands = []
# set speeddating format that is compatible with orgmode
try:
if int(vim.eval(u_encode(u'exists(":SpeedDatingFormat")'))) == 2:
vim.command(u_encode(u':1SpeedDatingFormat %Y-%m-%d %a'))
vim.command(u_encode(u':1SpeedDatingFormat %Y-%m-%d %a %H:%M'))
else:
echom(u'Speeddating plugin not installed. Please install it.')
except:
echom(u'Speeddating plugin not installed. Please install it.')
@classmethod
def _modify_time(cls, startdate, modifier):
u"""Modify the given startdate according to modifier. Return the new
date or datetime.
See http://orgmode.org/manual/The-date_002ftime-prompt.html
"""
if modifier is None or modifier == '' or modifier == '.':
return startdate
# rm crap from modifier
modifier = modifier.strip()
ops = {'-': operator.sub, '+': operator.add}
# check real date
date_regex = r"(\d\d\d\d)-(\d\d)-(\d\d)"
match = re.search(date_regex, modifier)
if match:
year, month, day = match.groups()
newdate = date(int(year), int(month), int(day))
# check abbreviated date, seperated with '-'
date_regex = u"(\d{1,2})-(\d+)-(\d+)"
match = re.search(date_regex, modifier)
if match:
year, month, day = match.groups()
newdate = date(2000 + int(year), int(month), int(day))
# check abbreviated date, seperated with '/'
# month/day
date_regex = u"(\d{1,2})/(\d{1,2})"
match = re.search(date_regex, modifier)
if match:
month, day = match.groups()
newdate = date(startdate.year, int(month), int(day))
# date should be always in the future
if newdate < startdate:
newdate = date(startdate.year + 1, int(month), int(day))
# check full date, seperated with 'space'
# month day year
# 'sep 12 9' --> 2009 9 12
date_regex = u"(\w\w\w) (\d{1,2}) (\d{1,2})"
match = re.search(date_regex, modifier)
if match:
gr = match.groups()
day = int(gr[1])
month = int(cls.month_mapping[gr[0]])
year = 2000 + int(gr[2])
newdate = date(year, int(month), int(day))
# check days as integers
date_regex = u"^(\d{1,2})$"
match = re.search(date_regex, modifier)
if match:
newday, = match.groups()
newday = int(newday)
if newday > startdate.day:
newdate = date(startdate.year, startdate.month, newday)
else:
# TODO: DIRTY, fix this
# this does NOT cover all edge cases
newdate = startdate + timedelta(days=28)
newdate = date(newdate.year, newdate.month, newday)
# check for full days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
modifier_lc = modifier.lower()
match = re.search(u'mon|tue|wed|thu|fri|sat|sun', modifier_lc)
if match:
weekday_mapping = {
u'mon': 0, u'tue': 1, u'wed': 2, u'thu': 3,
u'fri': 4, u'sat': 5, u'sun': 6}
diff = (weekday_mapping[modifier_lc] - startdate.weekday()) % 7
# use next weeks weekday if current weekday is the same as modifier
if diff == 0:
diff = 7
newdate = startdate + timedelta(days=diff)
# check for days modifier with appended d
match = re.search(u'^(\+|-)(\d*)d', modifier)
if match:
op, days = match.groups()
newdate = ops[op](startdate, timedelta(days=int(days)))
# check for days modifier without appended d
match = re.search(u'^(\+|-)(\d*) |^(\+|-)(\d*)$', modifier)
if match:
groups = match.groups()
try:
op = groups[0]
days = int(groups[1])
except:
op = groups[2]
days = int(groups[3])
newdate = ops[op](startdate, timedelta(days=days))
# check for week modifier
match = re.search(u'^(\+|-)(\d+)w', modifier)
if match:
op, weeks = match.groups()
newdate = ops[op](startdate, timedelta(weeks=int(weeks)))
# check for month modifier
match = re.search(u'^(\+|-)(\d+)m', modifier)
if match:
op, months = match.groups()
newdate = date(startdate.year, ops[op](startdate.month, int(months)),
startdate.day)
# check for year modifier
match = re.search(u'^(\+|-)(\d*)y', modifier)
if match:
op, years = match.groups()
newdate = date(ops[op](startdate.year, int(years)), startdate.month,
startdate.day)
# check for month day
match = re.search(
u'(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) (\d{1,2})',
modifier.lower())
if match:
month = cls.month_mapping[match.groups()[0]]
day = int(match.groups()[1])
newdate = date(startdate.year, int(month), int(day))
# date should be always in the future
if newdate < startdate:
newdate = date(startdate.year + 1, int(month), int(day))
# check abbreviated date, seperated with '/'
# month/day/year
date_regex = u"(\d{1,2})/(\d+)/(\d+)"
match = re.search(date_regex, modifier)
if match:
month, day, year = match.groups()
newdate = date(2000 + int(year), int(month), int(day))
# check for month day year
# sep 12 2011
match = re.search(
u'(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) (\d{1,2}) (\d{1,4})',
modifier.lower())
if match:
month = int(cls.month_mapping[match.groups()[0]])
day = int(match.groups()[1])
if len(match.groups()[2]) < 4:
year = 2000 + int(match.groups()[2])
else:
year = int(match.groups()[2])
newdate = date(year, month, day)
# check for time: HH:MM
# '12:45' --> datetime(2006, 06, 13, 12, 45))
match = re.search(u'(\d{1,2}):(\d\d)$', modifier)
if match:
try:
startdate = newdate
except:
pass
return datetime(
startdate.year, startdate.month, startdate.day,
int(match.groups()[0]), int(match.groups()[1]))
try:
return newdate
except:
return startdate
@classmethod
def insert_timestamp(cls, active=True):
u"""
Insert a timestamp at the cursor position.
TODO: show fancy calendar to pick the date from.
TODO: add all modifier of orgmode.
"""
today = date.today()
msg = u''.join([
u'Inserting ',
unicode(u_decode(today.strftime(u'%Y-%m-%d %a'))),
u' | Modify date'])
modifier = get_user_input(msg)
# abort if the user canceled the input promt
if modifier is None:
return
newdate = cls._modify_time(today, modifier)
# format
if isinstance(newdate, datetime):
newdate = newdate.strftime(
u_decode(u_encode(u'%Y-%m-%d %a %H:%M')))
else:
newdate = newdate.strftime(
u_decode(u_encode(u'%Y-%m-%d %a')))
timestamp = u'<%s>' % newdate if active else u'[%s]' % newdate
insert_at_cursor(timestamp)
@classmethod
def insert_timestamp_with_calendar(cls, active=True):
u"""
Insert a timestamp at the cursor position.
Show fancy calendar to pick the date from.
TODO: add all modifier of orgmode.
"""
if int(vim.eval(u_encode(u'exists(":CalendarH")'))) != 2:
vim.command("echo 'Please install plugin Calendar to enable this function'")
return
vim.command("CalendarH")
# backup calendar_action
calendar_action = vim.eval("g:calendar_action")
vim.command("let g:org_calendar_action_backup = '" + calendar_action + "'")
vim.command("let g:calendar_action = 'CalendarAction'")
timestamp_template = u'<%s>' if active else u'[%s]'
# timestamp template
vim.command("let g:org_timestamp_template = '" + timestamp_template + "'")
def register(self):
u"""
Registration of the plugin.
Key bindings and other initialization should be done here.
"""
add_cmd_mapping_menu(
self,
name=u'OrgDateInsertTimestampActiveCmdLine',
key_mapping=u'<localleader>sa',
function=u'%s ORGMODE.plugins[u"Date"].insert_timestamp()' % VIM_PY_CALL,
menu_desrc=u'Timest&'
)
add_cmd_mapping_menu(
self,
name=u'OrgDateInsertTimestampInactiveCmdLine',
key_mapping='<localleader>si',
function=u'%s ORGMODE.plugins[u"Date"].insert_timestamp(False)' % VIM_PY_CALL,
menu_desrc=u'Timestamp (&inactive)'
)
add_cmd_mapping_menu(
self,
name=u'OrgDateInsertTimestampActiveWithCalendar',
key_mapping=u'<localleader>pa',
function=u'%s ORGMODE.plugins[u"Date"].insert_timestamp_with_calendar()' % VIM_PY_CALL,
menu_desrc=u'Timestamp with Calendar'
)
add_cmd_mapping_menu(
self,
name=u'OrgDateInsertTimestampInactiveWithCalendar',
key_mapping=u'<localleader>pi',
function=u'%s ORGMODE.plugins[u"Date"].insert_timestamp_with_calendar(False)' % VIM_PY_CALL,
menu_desrc=u'Timestamp with Calendar(inactive)'
)
submenu = self.menu + Submenu(u'Change &Date')
submenu + ActionEntry(u'Day &Earlier', u'<C-x>', u'<C-x>')
submenu + ActionEntry(u'Day &Later', u'<C-a>', u'<C-a>')
# vim: set noexpandtab:
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.util import wpr_modes
from page_sets.rendering import rendering_shared_state
from page_sets.rendering import rendering_story
from page_sets.rendering import story_tags
from page_sets.system_health import platforms
from page_sets.login_helpers import linkedin_login
from page_sets.login_helpers import google_login
class ToughPinchZoomPage(rendering_story.RenderingStory):
ABSTRACT_STORY = True
SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY
TAGS = [story_tags.GPU_RASTERIZATION, story_tags.TOUGH_PINCH_ZOOM]
def __init__(self,
page_set,
name_suffix='',
shared_page_state_class=(
rendering_shared_state.DesktopRenderingSharedState),
extra_browser_args=None):
super(ToughPinchZoomPage, self).__init__(
page_set=page_set,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args,
shared_page_state_class=shared_page_state_class)
self.target_scale_factor = page_set.target_scale_factor
def RunPinchGesture(self, action_runner, left_anchor_ratio=0.5,
top_anchor_ratio=0.5, scale_factor=None,
speed_in_pixels_per_second=800):
with action_runner.CreateGestureInteraction('PinchAction',
repeatable=True):
action_runner.PinchPage(
left_anchor_ratio=left_anchor_ratio,
top_anchor_ratio=top_anchor_ratio,
scale_factor=scale_factor,
speed_in_pixels_per_second=speed_in_pixels_per_second)
def RunPageInteractions(self, action_runner):
action_runner.tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
for _ in xrange(0, 3):
current_scale_factor = self.target_scale_factor
self.RunPinchGesture(action_runner, scale_factor=current_scale_factor)
while current_scale_factor > 1.0:
current_scale_factor *= 1/2.0
self.RunPinchGesture(action_runner, scale_factor=1/2.0)
class GoogleSearchPinchZoom2018Page(ToughPinchZoomPage):
""" Why: top google property; a google tab is often open. """
BASE_NAME = 'google_search_pinch'
YEAR = '2018'
URL = 'https://www.google.com/#hl=en&q=barack+obama'
def RunNavigateSteps(self, action_runner):
super(GoogleSearchPinchZoom2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='Next')
class GmailPinchZoom2018Page(ToughPinchZoomPage):
""" Why: productivity, top google properties """
BASE_NAME = 'gmail_pinch'
YEAR = '2018'
URL = 'https://mail.google.com/mail/'
def RunNavigateSteps(self, action_runner):
if self.wpr_mode != wpr_modes.WPR_REPLAY:
google_login.NewLoginGoogleAccount(action_runner, 'googletest')
super(GmailPinchZoom2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined &&'
'document.getElementById("gb") !== null')
class GoogleCalendarPinchZoom2018Page(ToughPinchZoomPage):
""" Why: productivity, top google properties """
BASE_NAME = 'google_calendar_pinch'
YEAR = '2018'
URL = 'https://www.google.com/calendar/'
def RunNavigateSteps(self, action_runner):
if self.wpr_mode != wpr_modes.WPR_REPLAY:
google_login.NewLoginGoogleAccount(action_runner, 'googletest')
super(GoogleCalendarPinchZoom2018Page, self).RunNavigateSteps(
action_runner)
action_runner.WaitForElement('span[class~="sm8sCf"]')
class GoogleImagePinchZoom2018Page(ToughPinchZoomPage):
""" Why: tough image case; top google properties """
BASE_NAME = 'google_image_pinch'
YEAR = '2018'
URL = 'https://www.google.com/search?q=cats&tbm=isch'
class YoutubePinchZoom2018Page(ToughPinchZoomPage):
""" Why: #3 (Alexa global) """
BASE_NAME = 'youtube_pinch'
YEAR = '2018'
URL = 'http://www.youtube.com'
def RunNavigateSteps(self, action_runner):
super(YoutubePinchZoom2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(selector='#buttons')
class BlogSpotPinchZoom2018Page(ToughPinchZoomPage):
"""
Why: #11 (Alexa global), google property; some blogger layouts have infinite
scroll but more interesting
"""
BASE_NAME = 'blogspot_pinch'
YEAR = '2018'
URL = 'http://googlewebmastercentral.blogspot.com/'
def RunNavigateSteps(self, action_runner):
super(BlogSpotPinchZoom2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement('div[class="searchBox"]')
class FacebookPinchZoom2018Page(ToughPinchZoomPage):
""" Why: top social,Public profile """
BASE_NAME = 'facebook_pinch'
YEAR = '2018'
URL = 'http://www.facebook.com/barackobama'
def RunNavigateSteps(self, action_runner):
super(FacebookPinchZoom2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='Videos')
class LinkedinPinchZoom2018Page(ToughPinchZoomPage):
""" Why: #12 (Alexa global),Public profile """
BASE_NAME = 'linkedin_pinch'
YEAR = '2018'
URL = 'http://www.linkedin.com/in/linustorvalds'
def RunNavigateSteps(self, action_runner):
if self.wpr_mode != wpr_modes.WPR_REPLAY:
linkedin_login.LoginDesktopAccount(action_runner, 'linkedin')
super(LinkedinPinchZoom2018Page, self).RunNavigateSteps(action_runner)
class TwitterPinchZoom2018Page(ToughPinchZoomPage):
""" Why: #8 (Alexa global),Picked an interesting page """
BASE_NAME = 'twitter_pinch'
YEAR = '2018'
URL = 'https://twitter.com/katyperry'
def RunNavigateSteps(self, action_runner):
super(TwitterPinchZoom2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(selector='.ProfileNav')
class ESPNPinchZoom2018Page(ToughPinchZoomPage):
""" Why: #1 sports """
BASE_NAME = 'espn_pinch'
YEAR = '2018'
URL = 'http://espn.go.com/nba'
class AccuWeatherPinchZoom2018Page(ToughPinchZoomPage):
""" Why: #2 weather according to Alexa """
BASE_NAME = 'accu_weather_pinch'
YEAR = '2018'
URL = 'https://www.accuweather.com/en/us/new-york-ny/10017/weather-forecast/349727'
class TwitchPinchZoom2018Page(ToughPinchZoomPage):
""" Why: #1 games according to Alexa """
BASE_NAME = 'twitch_pinch'
YEAR = '2018'
URL = 'https://www.twitch.tv'
class YahooNewsPinchZoom2018Page(ToughPinchZoomPage):
""" Why: #1 news worldwide (Alexa global) """
BASE_NAME = 'yahoo_news_pinch'
YEAR = '2018'
URL = 'http://news.yahoo.com'
class CnnPinchZoom2018Page(ToughPinchZoomPage):
""" Why: #2 news worldwide """
BASE_NAME = 'cnn_pinch'
YEAR = '2018'
URL = 'http://www.cnn.com'
class AmazonPinchZoom2018Page(ToughPinchZoomPage):
"""
Why: #1 world commerce website by visits; #3 commerce in the US by
time spent
"""
BASE_NAME = 'amazon_pinch'
YEAR = '2018'
URL = 'http://www.amazon.com'
class EBayPinchZoom2018Page(ToughPinchZoomPage):
""" Why: #1 commerce website by time spent by users in US"""
BASE_NAME = 'ebay_pinch'
YEAR = '2018'
URL = 'http://www.ebay.com'
class BookingPinchZoom2018Page(ToughPinchZoomPage):
""" Why: #1 Alexa recreation"""
BASE_NAME = 'booking_pinch'
YEAR = '2018'
URL = 'http://booking.com'
class YahooSportsPinchZoom2018Page(ToughPinchZoomPage):
""" Why: #1 Alexa sports"""
BASE_NAME = 'yahoo_sports_pinch'
YEAR = '2018'
URL = 'http://sports.yahoo.com/'
| |
# -*- test-case-name: txweb2.dav.test.test_prop.PROP.test_PROPFIND -*-
##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, wsanchez@apple.com
##
"""
WebDAV PROPFIND method
"""
__all__ = [
"http_PROPFIND",
"propertyName",
]
from twisted.python.failure import Failure
from twisted.internet.defer import deferredGenerator, waitForDeferred
from twext.python.log import Logger
from txweb2.http import HTTPError
from txweb2 import responsecode
from txweb2.http import StatusResponse
from txdav.xml import element as davxml
from txweb2.dav.http import MultiStatusResponse, statusForFailure, \
ErrorResponse
from txweb2.dav.util import normalizeURL, davXMLFromStream
log = Logger()
def http_PROPFIND(self, request):
"""
Respond to a PROPFIND request. (RFC 2518, section 8.1)
"""
if not self.exists():
log.error("File not found: %s" % (self,))
raise HTTPError(responsecode.NOT_FOUND)
#
# Check authentication and access controls
#
x = waitForDeferred(self.authorize(request, (davxml.Read(),)))
yield x
x.getResult()
#
# Read request body
#
try:
doc = waitForDeferred(davXMLFromStream(request.stream))
yield doc
doc = doc.getResult()
except ValueError, e:
log.error("Error while handling PROPFIND body: %s" % (e,))
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, str(e)))
if doc is None:
# No request body means get all properties.
search_properties = "all"
else:
#
# Parse request
#
find = doc.root_element
if not isinstance(find, davxml.PropertyFind):
error = ("Non-%s element in PROPFIND request body: %s"
% (davxml.PropertyFind.sname(), find))
log.error("Error: {err}", err=error)
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, error))
container = find.children[0]
if isinstance(container, davxml.AllProperties):
# Get all properties
search_properties = "all"
elif isinstance(container, davxml.PropertyName):
# Get names only
search_properties = "names"
elif isinstance(container, davxml.PropertyContainer):
properties = container.children
search_properties = [(p.namespace, p.name) for p in properties]
else:
raise AssertionError("Unexpected element type in %s: %s"
% (davxml.PropertyFind.sname(), container))
#
# Generate XML output stream
#
request_uri = request.uri
depth = request.headers.getHeader("depth", "infinity")
# By policy we will never allow a depth:infinity propfind
if depth == "infinity":
raise HTTPError(ErrorResponse(responsecode.FORBIDDEN, davxml.PropfindFiniteDepth()))
# Look for Prefer header first, then try Brief
prefer = request.headers.getHeader("prefer", {})
returnMinimal = any([key == "return" and value == "minimal" for key, value, _ignore_args in prefer])
noRoot = any([key == "depth-noroot" and value is None for key, value, _ignore_args in prefer])
if not returnMinimal:
returnMinimal = request.headers.getHeader("brief", False)
xml_responses = []
# FIXME: take advantage of the new generative properties of findChildren
my_url = normalizeURL(request_uri)
if self.isCollection() and not my_url.endswith("/"):
my_url += "/"
# Do some optimisation of access control calculation by determining any inherited ACLs outside of
# the child resource loop and supply those to the checkPrivileges on each child.
filtered_aces = waitForDeferred(self.inheritedACEsforChildren(request))
yield filtered_aces
filtered_aces = filtered_aces.getResult()
if depth in ("1", "infinity") and noRoot:
resources = []
else:
resources = [(self, my_url)]
d = self.findChildren(depth, request, lambda x, y: resources.append((x, y)), (davxml.Read(),), inherited_aces=filtered_aces)
x = waitForDeferred(d)
yield x
x.getResult()
for resource, uri in resources:
if search_properties is "names":
try:
resource_properties = waitForDeferred(resource.listProperties(request))
yield resource_properties
resource_properties = resource_properties.getResult()
except:
log.error("Unable to get properties for resource %r" % (resource,))
raise
properties_by_status = {
responsecode.OK: [propertyName(p) for p in resource_properties]
}
else:
properties_by_status = {
responsecode.OK : [],
responsecode.NOT_FOUND : [],
}
if search_properties is "all":
properties_to_enumerate = waitForDeferred(resource.listAllprop(request))
yield properties_to_enumerate
properties_to_enumerate = properties_to_enumerate.getResult()
else:
properties_to_enumerate = search_properties
for property in properties_to_enumerate:
has = waitForDeferred(resource.hasProperty(property, request))
yield has
has = has.getResult()
if has:
try:
resource_property = waitForDeferred(resource.readProperty(property, request))
yield resource_property
resource_property = resource_property.getResult()
except:
f = Failure()
status = statusForFailure(f, "getting property: %s" % (property,))
if status not in properties_by_status:
properties_by_status[status] = []
if not returnMinimal or status != responsecode.NOT_FOUND:
properties_by_status[status].append(propertyName(property))
else:
if resource_property is not None:
properties_by_status[responsecode.OK].append(resource_property)
elif not returnMinimal:
properties_by_status[responsecode.NOT_FOUND].append(propertyName(property))
elif not returnMinimal:
properties_by_status[responsecode.NOT_FOUND].append(propertyName(property))
propstats = []
for status in properties_by_status:
properties = properties_by_status[status]
if not properties:
continue
xml_status = davxml.Status.fromResponseCode(status)
xml_container = davxml.PropertyContainer(*properties)
xml_propstat = davxml.PropertyStatus(xml_container, xml_status)
propstats.append(xml_propstat)
# Always need to have at least one propstat present (required by Prefer header behavior)
if len(propstats) == 0:
propstats.append(davxml.PropertyStatus(
davxml.PropertyContainer(),
davxml.Status.fromResponseCode(responsecode.OK)
))
xml_resource = davxml.HRef(uri)
xml_response = davxml.PropertyStatusResponse(xml_resource, *propstats)
xml_responses.append(xml_response)
#
# Return response
#
yield MultiStatusResponse(xml_responses)
http_PROPFIND = deferredGenerator(http_PROPFIND)
##
# Utilities
##
def propertyName(name):
property_namespace, property_name = name
pname = davxml.WebDAVUnknownElement()
pname.namespace = property_namespace
pname.name = property_name
return pname
| |
import io
import uuid
from mitmproxy.net import websockets
from mitmproxy.test import tutils
from mitmproxy import tcp
from mitmproxy import websocket
from mitmproxy import controller
from mitmproxy import http
from mitmproxy import connections
from mitmproxy import flow
from mitmproxy.net import http as net_http
def ttcpflow(client_conn=True, server_conn=True, messages=True, err=None):
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if messages is True:
messages = [
tcp.TCPMessage(True, b"hello"),
tcp.TCPMessage(False, b"it's me"),
]
if err is True:
err = terr()
f = tcp.TCPFlow(client_conn, server_conn)
f.messages = messages
f.error = err
f.reply = controller.DummyReply()
return f
def twebsocketflow(client_conn=True, server_conn=True, messages=True, err=None, handshake_flow=True):
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if handshake_flow is True:
req = http.HTTPRequest(
"example.com",
80,
b"GET",
b"http",
b"example.com",
b"/ws",
b"HTTP/1.1",
headers=net_http.Headers(
connection="upgrade",
upgrade="websocket",
sec_websocket_version="13",
sec_websocket_key="1234",
),
content=b'',
trailers=None,
timestamp_start=946681200,
timestamp_end=946681201,
)
resp = http.HTTPResponse(
b"HTTP/1.1",
101,
reason=net_http.status_codes.RESPONSES.get(101),
headers=net_http.Headers(
connection='upgrade',
upgrade='websocket',
sec_websocket_accept=b'',
),
content=b'',
trailers=None,
timestamp_start=946681202,
timestamp_end=946681203,
)
handshake_flow = http.HTTPFlow(client_conn, server_conn)
handshake_flow.request = req
handshake_flow.response = resp
f = websocket.WebSocketFlow(client_conn, server_conn, handshake_flow)
f.metadata['websocket_handshake'] = handshake_flow.id
handshake_flow.metadata['websocket_flow'] = f.id
handshake_flow.metadata['websocket'] = True
if messages is True:
messages = [
websocket.WebSocketMessage(websockets.OPCODE.BINARY, True, b"hello binary"),
websocket.WebSocketMessage(websockets.OPCODE.TEXT, True, "hello text".encode()),
websocket.WebSocketMessage(websockets.OPCODE.TEXT, False, "it's me".encode()),
]
if err is True:
err = terr()
f.messages = messages
f.error = err
f.reply = controller.DummyReply()
return f
def tflow(client_conn=True, server_conn=True, req=True, resp=None, err=None):
"""
@type client_conn: bool | None | mitmproxy.proxy.connection.ClientConnection
@type server_conn: bool | None | mitmproxy.proxy.connection.ServerConnection
@type req: bool | None | mitmproxy.proxy.protocol.http.HTTPRequest
@type resp: bool | None | mitmproxy.proxy.protocol.http.HTTPResponse
@type err: bool | None | mitmproxy.proxy.protocol.primitives.Error
@return: mitmproxy.proxy.protocol.http.HTTPFlow
"""
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if req is True:
req = tutils.treq()
if resp is True:
resp = tutils.tresp()
if err is True:
err = terr()
f = http.HTTPFlow(client_conn, server_conn)
f.request = req
f.response = resp
f.error = err
f.reply = controller.DummyReply()
return f
class DummyFlow(flow.Flow):
"""A flow that is neither HTTP nor TCP."""
def __init__(self, client_conn, server_conn, live=None):
super().__init__("dummy", client_conn, server_conn, live)
def tdummyflow(client_conn=True, server_conn=True, err=None):
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if err is True:
err = terr()
f = DummyFlow(client_conn, server_conn)
f.error = err
f.reply = controller.DummyReply()
return f
def tclient_conn():
"""
@return: mitmproxy.proxy.connection.ClientConnection
"""
c = connections.ClientConnection.from_state(dict(
id=str(uuid.uuid4()),
address=("127.0.0.1", 22),
clientcert=None,
mitmcert=None,
tls_established=False,
timestamp_start=946681200,
timestamp_tls_setup=946681201,
timestamp_end=946681206,
sni="address",
cipher_name="cipher",
alpn_proto_negotiated=b"http/1.1",
tls_version="TLSv1.2",
tls_extensions=[(0x00, bytes.fromhex("000e00000b6578616d"))],
))
c.reply = controller.DummyReply()
c.rfile = io.BytesIO()
c.wfile = io.BytesIO()
return c
def tserver_conn():
"""
@return: mitmproxy.proxy.connection.ServerConnection
"""
c = connections.ServerConnection.from_state(dict(
id=str(uuid.uuid4()),
address=("address", 22),
source_address=("address", 22),
ip_address=("192.168.0.1", 22),
cert=None,
timestamp_start=946681202,
timestamp_tcp_setup=946681203,
timestamp_tls_setup=946681204,
timestamp_end=946681205,
tls_established=False,
sni="address",
alpn_proto_negotiated=None,
tls_version="TLSv1.2",
via=None,
))
c.reply = controller.DummyReply()
c.rfile = io.BytesIO()
c.wfile = io.BytesIO()
return c
def terr(content="error"):
"""
@return: mitmproxy.proxy.protocol.primitives.Error
"""
err = flow.Error(content)
return err
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Saves and restore variables inside traced @tf.functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.training.saving import checkpoint_options
from tensorflow.python.training.saving import saveable_hook
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util import nest
class _SingleDeviceSaver(object):
"""Saves and restores checkpoints from the current device."""
__slots__ = ["_saveable_objects"]
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
"""
saveable_objects = list(saveable_objects)
for saveable in saveable_objects:
if not isinstance(saveable, saveable_object.SaveableObject):
raise ValueError(
"Expected a list of SaveableObjects, got %s." % (saveable,))
self._saveable_objects = saveable_objects
def save(self, file_prefix, options=None):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
options: Optional `CheckpointOptions` object.
Returns:
An `Operation`, or None when executing eagerly.
"""
options = options or checkpoint_options.CheckpointOptions()
tensor_names = []
tensors = []
tensor_slices = []
for saveable in self._saveable_objects:
for spec in saveable.specs:
tensor = spec.tensor
# A tensor value of `None` indicates that this SaveableObject gets
# recorded in the object graph, but that no value is saved in the
# checkpoint.
if tensor is not None:
tensor_names.append(spec.name)
tensors.append(tensor)
tensor_slices.append(spec.slice_spec)
save_device = options.experimental_io_device or "cpu:0"
with ops.device(save_device):
return io_ops.save_v2(file_prefix, tensor_names, tensor_slices, tensors)
def restore(self, file_prefix, options=None):
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
options: Optional `CheckpointOptions` object.
Returns:
A dictionary mapping from SaveableObject names to restore operations.
"""
options = options or checkpoint_options.CheckpointOptions()
restore_specs = []
tensor_structure = []
for saveable in self._saveable_objects:
saveable_tensor_structure = []
tensor_structure.append(saveable_tensor_structure)
for spec in saveable.specs:
saveable_tensor_structure.append(spec.name)
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
tensor_names, tensor_slices, tensor_dtypes = zip(*restore_specs)
restore_device = options.experimental_io_device or "cpu:0"
with ops.device(restore_device):
restored_tensors = io_ops.restore_v2(
file_prefix, tensor_names, tensor_slices, tensor_dtypes)
structured_restored_tensors = nest.pack_sequence_as(
tensor_structure, restored_tensors)
restore_ops = {}
for saveable, restored_tensors in zip(self._saveable_objects,
structured_restored_tensors):
restore_ops[saveable.name] = saveable.restore(
restored_tensors, restored_shapes=None)
return restore_ops
def sharded_filename(filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
class MultiDeviceSaver(object):
"""Saves checkpoints directly from multiple devices.
Note that this is a low-level utility which stores Tensors in the keys
specified by `SaveableObject`s. Higher-level utilities for object-based
checkpointing are built on top of it.
"""
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
Objects extending `SaveableObject` will be saved and restored, and
objects extending `SaveableHook` will be called into at save and
restore time.
"""
self._before_save_callbacks = []
self._after_restore_callbacks = []
saveable_objects = list(saveable_objects)
saveables_by_device = {}
for saveable in saveable_objects:
is_saveable = isinstance(saveable, saveable_object.SaveableObject)
is_hook = isinstance(saveable, saveable_hook.SaveableHook)
if not is_saveable and not is_hook:
raise ValueError(
"Expected a dictionary of SaveableObjects, got {}."
.format(saveable))
if is_hook:
self._before_save_callbacks.append(saveable.before_save)
self._after_restore_callbacks.append(saveable.after_restore)
if is_saveable:
host_device = saveable_object_util.set_cpu0(saveable.device)
saveables_by_device.setdefault(host_device, []).append(saveable)
self._single_device_savers = {
device: _SingleDeviceSaver(saveables)
for device, saveables in saveables_by_device.items()}
def to_proto(self):
"""Serializes to a SaverDef referencing the current graph."""
filename_tensor = array_ops.placeholder(
shape=[], dtype=dtypes.string, name="saver_filename")
save_tensor = self._traced_save(filename_tensor)
restore_op = self._traced_restore(filename_tensor).op
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
version=saver_pb2.SaverDef.V2)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_save(self, file_prefix):
save_op = self.save(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies([save_op]):
return array_ops.identity(file_prefix)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_restore(self, file_prefix):
restore_ops = self.restore(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies(restore_ops.values()):
return array_ops.identity(file_prefix)
def save(self, file_prefix, options=None):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
options: Optional `CheckpointOptions` object.
Returns:
An `Operation`, or None when executing eagerly.
"""
options = options or checkpoint_options.CheckpointOptions()
for callback in self._before_save_callbacks:
callback()
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><sharded_suffix>.
#
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
# Filesystems with eventual consistency (such as S3), don't need a
# temporary location. Using a temporary directory in those cases might
# cause situations where files are not available during copy.
#
# Users only need to interact with the user-specified prefix, which is
# "<train dir>/myckpt" in this case. Save() and Restore() work with the
# prefix directly, instead of any physical pathname. (On failure and
# subsequent restore, an outdated and orphaned temporary directory can be
# safely removed.)
with ops.device("CPU"):
sharded_suffix = array_ops.where(
string_ops.regex_full_match(file_prefix, "^s3://.*"),
constant_op.constant(".part"),
constant_op.constant("_temp/part"))
tmp_checkpoint_prefix = string_ops.string_join(
[file_prefix, sharded_suffix])
def save_fn():
num_shards = len(self._single_device_savers)
sharded_saves = []
sharded_prefixes = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
last_device = None
for shard, (device, saver) in enumerate(
sorted(self._single_device_savers.items())):
last_device = device
with ops.device(saveable_object_util.set_cpu0(device)):
shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard,
num_shards_tensor)
sharded_prefixes.append(shard_prefix)
with ops.device(device):
# _SingleDeviceSaver will use the CPU device when necessary, but
# initial read operations should be placed on the SaveableObject's
# device.
sharded_saves.append(saver.save(shard_prefix, options))
with ops.control_dependencies(sharded_saves):
# Merge on the io_device if specified, otherwise co-locates the merge op
# with the last device used.
merge_device = (
options.experimental_io_device or
saveable_object_util.set_cpu0(last_device))
with ops.device(merge_device):
# V2 format write path consists of a metadata merge step. Once
# merged, attempts to delete the temporary directory,
# "<user-fed prefix>_temp".
return gen_io_ops.merge_v2_checkpoints(
sharded_prefixes, file_prefix, delete_old_dirs=True)
# Since this will causes a function re-trace on each save, limit this to the
# cases where it is needed: eager and when there are multiple tasks/single
# device savers. Note that the retrace is needed to ensure we pickup the
# latest values of options like experimental_io_device.
if context.executing_eagerly() and len(self._single_device_savers) > 1:
# Explicitly place the identity op on the first device.
@def_function.function(jit_compile=False)
def tf_function_save():
save_fn()
tf_function_save()
else:
return save_fn()
def restore(self, file_prefix, options=None):
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
options: Optional `CheckpointOptions` object.
Returns:
When not run eagerly or when saving on a single device, returns a
dictionary mapping from SaveableObject names to restore operations;
otherwise, returns an empty dict.
"""
options = options or checkpoint_options.CheckpointOptions()
def restore_fn():
restore_ops = {}
# Sort by device name to avoid propagating non-deterministic dictionary
# ordering in some Python versions.
for device, saver in sorted(self._single_device_savers.items()):
with ops.device(device):
restore_ops.update(saver.restore(file_prefix, options))
return restore_ops
# Since this will causes a function re-trace on each restore, limit this to
# cases where it is needed: eager and when there are multiple tasks/single
# device savers. Note that the retrace is needed to ensure we pickup the
# latest values of options like experimental_io_device.
if context.executing_eagerly() and len(self._single_device_savers) > 1:
@def_function.function(jit_compile=False)
def tf_function_restore():
restore_fn()
return {}
restore_ops = tf_function_restore()
else:
restore_ops = restore_fn()
for callback in self._after_restore_callbacks:
callback()
return restore_ops
| |
import numpy as np
import sklearn
from sklearn.metrics import roc_curve, auc
import sklearn.metrics
import sklearn.cross_validation
import copy
import util
import time
import metrics as ranking_metrics
import models.regression
import models.ensembles
import models.DNN
import models.baselines
import multiprocessing
def fill_in_truth_and_predictions(truth, predictions, fold, y_all, y_pred, learn_options, test):
truth[fold]['ranks'] = np.hstack((truth[fold]['ranks'],
y_all[learn_options['rank-transformed target name']].values[test].flatten()))
truth[fold]['thrs'] = np.hstack((truth[fold]['thrs'],
y_all[learn_options['binary target name']].values[test].flatten()))
if 'raw_target_name' in learn_options.keys():
truth[fold]['raw'] = np.hstack((truth[fold]['raw'],
y_all[learn_options['raw target name']].values[test].flatten()))
predictions[fold] = np.hstack((predictions[fold], y_pred.flatten()))
return truth, predictions
def construct_filename(learn_options, TEST):
if learn_options.has_key("V"):
filename = "V%s" % learn_options["V"]
else:
filename = "offV1"
if TEST:
filename = "TEST."
filename += learn_options["method"]
filename += '.order%d' % learn_options["order"]
# try:
# learn_options["target_name"] = ".%s" % learn_options["target_name"].split(" ")[1]
# except:
# pass
filename += learn_options["target_name"]
if learn_options["method"] == "GPy":
pass
# filename += ".R%d" % opt_options['num_restarts']
# filename += ".K%s" % learn_options['kerntype']
# if learn_options.has_key('degree'):
# filename += "d%d" % learn_options['degree']
# if learn_options['warped']:
# filename += ".Warp"
elif learn_options["method"] == "linreg":
filename += "." + learn_options["penalty"]
filename += "." + learn_options["cv"]
if learn_options["training_metric"] == "NDCG":
filename += ".NDGC_%d" % learn_options["NDGC_k"]
elif learn_options["training_metric"] == "AUC":
filename += ".AUC"
elif learn_options["training_metric"] == 'spearmanr':
filename += ".spearman"
print "filename = %s" % filename
return filename
def print_summary(global_metric, results, learn_options, feature_sets, flags):
print "\nSummary:"
print learn_options
print "\t\tglobal %s=%.2f" % (learn_options['metric'], global_metric)
print "\t\tmedian %s across folds=%.2f" % (learn_options['metric'], np.median(results[0]))
print "\t\torder=%d" % learn_options["order"]
if learn_options.has_key('kerntype'): "\t\tkern type = %s" % learn_options['kerntype']
if learn_options.has_key('degree'): print "\t\tdegree=%d" % learn_options['degree']
print "\t\ttarget_name=%s" % learn_options["target_name"]
for k in flags.keys():
print '\t\t' + k + '=' + str(learn_options[k])
print "\t\tfeature set:"
for set in feature_sets.keys():
print "\t\t\t%s" % set
print "\t\ttotal # features=%d" % results[4]
def extract_fpr_tpr_for_fold(aucs, fold, i, predictions, truth, y_binary, test, y_pred):
assert len(np.unique(y_binary))<=2, "if using AUC need binary targets"
fpr, tpr, _ = roc_curve(y_binary[test], y_pred)
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
def extract_NDCG_for_fold(metrics, fold, i, predictions, truth, y_ground_truth, test, y_pred, learn_options):
NDCG_fold = ranking_metrics.ndcg_at_k_ties(y_ground_truth[test].flatten(), y_pred.flatten(), learn_options["NDGC_k"])
metrics.append(NDCG_fold)
def extract_spearman_for_fold(metrics, fold, i, predictions, truth, y_ground_truth, test, y_pred, learn_options):
spearman = util.spearmanr_nonan(y_ground_truth[test].flatten(), y_pred.flatten())[0]
assert not np.isnan(spearman), "found nan spearman"
metrics.append(spearman)
def get_train_test(test_gene, y_all, train_genes=None):
# this is a bit convoluted because the train_genes+test_genes may not add up to all genes
# for e.g. when we load up V3, but then use only V2, etc.
is_off_target = 'MutatedSequence' in y_all.index.names
if is_off_target:
train = (y_all.index.get_level_values('MutatedSequence').values != test_gene)
test = None
return train, test
not_test = (y_all.index.get_level_values('Target gene').values != test_gene)
if train_genes is not None:
in_train_genes = np.zeros(not_test.shape, dtype=bool)
for t_gene in train_genes:
in_train_genes = np.logical_or(in_train_genes, (y_all.index.get_level_values('Target gene').values == t_gene))
train = np.logical_and(not_test, in_train_genes)
else:
train = not_test
#y_all['test'] as to do with extra pairs in V2
test = (y_all.index.get_level_values('Target gene').values== test_gene) * (y_all['test'].values == 1.)
# convert to indices
test = np.where(test == True)[0]
train = np.where(train == True)[0]
return train, test
def cross_validate(y_all, feature_sets, learn_options=None, TEST=False, train_genes=None, CV=True):
'''
feature_sets is a dictionary of "set name" to pandas.DataFrame
one set might be single-nucleotide, position-independent features of order X, for e.g.
Method: "GPy" or "linreg"
Metric: NDCG (learning to rank metric, Normalized Discounted Cumulative Gain); AUC
Output: cv_score_median, gene_rocs
When CV=False, it trains on everything (and tests on everything, just to fit the code)
'''
print "range of y_all is [%f, %f]" % (np.min(y_all[learn_options['target_name']].values), np.max(y_all[learn_options['target_name']].values))
allowed_methods = ["GPy", "linreg", "AdaBoostRegressor", "AdaBoostClassifier",
"DecisionTreeRegressor", "RandomForestRegressor",
"ARDRegression", "GPy_fs", "mean", "random", "DNN",
"lasso_ensemble", "doench", "logregL1", "sgrna_from_doench", 'SVC', 'xu_et_al']
assert learn_options["method"] in allowed_methods,"invalid method: %s" % learn_options["method"]
assert learn_options["method"] == "linreg" and learn_options['penalty'] == 'L2' or learn_options["weighted"] is None, "weighted only works with linreg L2 right now"
# construct filename from options
filename = construct_filename(learn_options, TEST)
print "Cross-validating genes..."
t2 = time.time()
y = np.array(y_all[learn_options["target_name"]].values[:,None],dtype=np.float64)
# concatenate feature sets in to one nparray, and get dimension of each
inputs, dim, dimsum, feature_names = util.concatenate_feature_sets(feature_sets)
if not CV:
assert learn_options['cv'] == 'gene', 'Must use gene-CV when CV is False (I need to use all of the genes and stratified complicates that)'
# set-up for cross-validation
## for outer loop, the one Doench et al use genes for
if learn_options["cv"] == "stratified":
assert not learn_options.has_key("extra_pairs") or learn_options['extra pairs'], "can't use extra pairs with stratified CV, need to figure out how to properly account for genes affected by two drugs"
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(y_all['Target gene'].values)
gene_classes = label_encoder.transform(y_all['Target gene'].values)
if 'n_folds' in learn_options.keys():
n_folds = learn_options['n_folds']
elif learn_options['train_genes'] is not None and learn_options["test_genes"] is not None:
n_folds = len(learn_options["test_genes"])
else:
n_folds = len(learn_options['all_genes'])
cv = sklearn.cross_validation.StratifiedKFold(gene_classes, n_folds=n_folds, shuffle=True)
fold_labels = ["fold%d" % i for i in range(1,n_folds+1)]
if learn_options['num_genes_remove_train'] is not None:
raise NotImplementedException()
elif learn_options["cv"]=="gene":
cv = []
if not CV:
train_test_tmp = get_train_test('dummy', y_all) # get train, test split using a dummy gene
train_tmp, test_tmp = train_test_tmp
# not a typo, using training set to test on as well, just for this case. Test set is not used
# for internal cross-val, etc. anyway.
train_test_tmp = (train_tmp, train_tmp)
cv.append(train_test_tmp)
fold_labels = learn_options['all_genes']
elif learn_options['train_genes'] is not None and learn_options["test_genes"] is not None:
assert learn_options['train_genes'] is not None and learn_options['test_genes'] is not None, "use both or neither"
for i, gene in enumerate(learn_options['test_genes']):
cv.append(get_train_test(gene, y_all, learn_options['train_genes']))
fold_labels = learn_options["test_genes"]
# if train and test genes are seperate, there should be only one fold
train_test_disjoint = set.isdisjoint(set(learn_options["train_genes"].tolist()), set(learn_options["test_genes"].tolist()))
else:
for i, gene in enumerate(learn_options['all_genes']):
train_test_tmp = get_train_test(gene, y_all)
cv.append(train_test_tmp)
fold_labels = learn_options['all_genes']
if learn_options['num_genes_remove_train'] is not None:
for i, (train,test) in enumerate(cv):
unique_genes = np.random.permutation(np.unique(np.unique(y_all['Target gene'][train])))
genes_to_keep = unique_genes[0:len(unique_genes) - learn_options['num_genes_remove_train']]
guides_to_keep = []
filtered_train = []
for j, gene in enumerate(y_all['Target gene']):
if j in train and gene in genes_to_keep:
filtered_train.append(j)
cv_i_orig = copy.deepcopy(cv[i])
cv[i] = (filtered_train, test)
if learn_options['num_genes_remove_train']==0:
assert np.all(cv_i_orig[0]==cv[i][0])
assert np.all(cv_i_orig[1]==cv[i][1])
print "# train/train after/before is %s, %s" % (len(cv[i][0]), len(cv_i_orig[0]))
print "# test/test after/before is %s, %s" % (len(cv[i][1]), len(cv_i_orig[1]))
else:
raise Exception("invalid cv options given: %s" % learn_options["cv"])
cv = [c for c in cv] #make list from generator, so can subset for TEST case
if TEST:
ind_to_use = [0]#[0,1]
cv = [cv[i] for i in ind_to_use]
fold_labels = [fold_labels[i] for i in ind_to_use]
truth = dict([(t, dict([(m, np.array([])) for m in ['raw', 'ranks', 'thrs']])) for t in fold_labels])
predictions = dict([(t, np.array([])) for t in fold_labels])
m = {}
metrics = []
#do the cross-validation
num_proc = learn_options["num_proc"]
if num_proc > 1:
num_proc = np.min([num_proc,len(cv)])
print "using multiprocessing with %d procs--one for each fold" % num_proc
jobs = []
pool = multiprocessing.Pool(processes=num_proc)
for i,fold in enumerate(cv):
train,test = fold
print "working on fold %d of %d, with %d train and %d test" % (i, len(cv), len(train), len(test))
if learn_options["method"]=="GPy":
job = pool.apply_async(models.GP.gp_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="linreg":
job = pool.apply_async(models.regression.linreg_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="logregL1":
job = pool.apply_async(models.regression.logreg_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="AdaBoostRegressor":
print "adaboostreg"
job = pool.apply_async(models.ensembles.adaboost_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, False))
elif learn_options["method"]=="AdaBoostClassifier":
job = pool.apply_async(models.ensembles.adaboost_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, True))
elif learn_options["method"]=="DecisionTreeRegressor":
job = pool.apply_async(models.ensembles.decisiontree_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="RandomForestRegressor":
job = pool.apply_async(models.ensembles.randomforest_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="ARDRegression":
job = pool.apply_async(models.regression.ARDRegression_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "random":
job = pool.apply_async(models.baselines.random_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "mean":
job = pool.apply_async(models.baselines.mean_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "SVC":
job = pool.apply_async(models.baselines.SVC_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "DNN":
job = pool.apply_async(models.DNN.DNN_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "lasso_ensemble":
job = pool.apply_async(models.ensembles.LASSOs_ensemble_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "doench":
job = pool.apply_async(models.baselines.doench_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "sgrna_from_doench":
job = pool.apply_async(models.baselines.sgrna_from_doench_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "xu_et_al":
job = pool.apply_async(models.baselines.xu_et_al_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
else:
raise Exception("did not find method=%s" % learn_options["method"])
jobs.append(job)
pool.close()
pool.join()
for i,fold in enumerate(cv):#i in range(0,len(jobs)):
y_pred, m[i] = jobs[i].get()
train,test = fold
if learn_options["training_metric"]=="AUC":
extract_fpr_tpr_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred)
elif learn_options["training_metric"]=="NDCG":
extract_NDCG_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
elif learn_options["training_metric"] == 'spearmanr':
extract_spearman_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
else:
raise Exception("invalid 'training_metric' in learn_options: %s" % learn_options["training_metric"])
truth, predictions = fill_in_truth_and_predictions(truth, predictions, fold_labels[i], y_all, y_pred, learn_options, test)
pool.terminate()
else:
# non parallel version
for i,fold in enumerate(cv):
train,test = fold
if learn_options["method"]=="GPy":
y_pred, m[i] = gp_on_fold(models.GP.feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="linreg":
y_pred, m[i] = models.regression.linreg_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="logregL1":
y_pred, m[i] = models.regression.logreg_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="AdaBoostRegressor":
y_pred, m[i] = models.ensembles.adaboost_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, classification=False)
elif learn_options["method"]=="AdaBoostClassifier":
y_pred, m[i] = models.ensembles.adaboost_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, classification=True)
elif learn_options["method"]=="DecisionTreeRegressor":
y_pred, m[i] = models.ensembles.decisiontree_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="RandomForestRegressor":
y_pred, m[i] = models.ensembles.randomforest_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="ARDRegression":
y_pred, m[i] = models.regression.ARDRegression_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="GPy_fs":
y_pred, m[i] = models.GP.gp_with_fs_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "random":
y_pred, m[i] = models.baselines.random_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "mean":
y_pred, m[i] = models.baselines.mean_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "SVC":
y_pred, m[i] = models.baselines.SVC_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "DNN":
y_pred, m[i] = models.DNN.DNN_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "lasso_ensemble":
y_pred, m[i] = models.ensembles.LASSOs_ensemble_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "doench":
y_pred, m[i] = models.baselines.doench_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "sgrna_from_doench":
y_pred, m[i] = models.baselines.sgrna_from_doench_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "xu_et_al":
y_pred, m[i] = models.baselines.xu_et_al_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
else:
raise Exception("invalid method found: %s" % learn_options["method"])
if learn_options["training_metric"]=="AUC":
# fills in truth and predictions
extract_fpr_tpr_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options['ground_truth_label']].values, test, y_pred)
elif learn_options["training_metric"]=="NDCG":
extract_NDCG_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
elif learn_options["training_metric"] == 'spearmanr':
extract_spearman_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
truth, predictions = fill_in_truth_and_predictions(truth, predictions, fold_labels[i], y_all, y_pred, learn_options, test)
print "\t\tRMSE: ", np.sqrt(((y_pred - y[test])**2).mean())
print "\t\tSpearman correlation: ", util.spearmanr_nonan(y[test], y_pred)[0]
print "\t\tfinished fold/gene %i of %i" % (i, len(fold_labels))
cv_median_metric =[np.median(metrics)]
gene_pred = [(truth, predictions)]
print "\t\tmedian %s across gene folds: %.3f" % (learn_options["training_metric"], cv_median_metric[-1])
t3 = time.time()
print "\t\tElapsed time for cv is %.2f seconds" % (t3-t2)
return metrics, gene_pred, fold_labels, m, dimsum, filename, feature_names
| |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import db
import os
import test_data
import time
# TODO, some of this stuff into db_indexer test
class DBTestBase(object):
def setUp(self):
self.test_data = test_data.TestData()
@property
def test_data_dir(self):
return self.test_data.test_data_dir
def tearDown(self):
self.test_data.close()
def test_dirs(self):
d1 = os.path.join(self.test_data_dir, 'project1')
d2 = os.path.join(self.test_data_dir, 'something')
d3 = os.path.join(self.test_data_dir, 'xxx')
self.assertEquals([], self.db.dirs)
d1_ = self.db.add_dir(d1)
self.assertEquals([d1_], self.db.dirs)
self.assertEquals(d1_.path, d1)
d2_ = self.db.add_dir(d2)
self.assertEquals([d1_, d2_], self.db.dirs)
self.db.delete_dir(d1_)
self.assertEquals([d2_], self.db.dirs)
d3_ = self.db.add_dir(d3)
self.assertEquals([d2_, d3_], self.db.dirs)
self.db.delete_dir(d3_)
self.assertEquals([d2_], self.db.dirs)
def test_nonexistant_dir(self):
self.db.add_dir(self.test_data_dir)
bad_dir = os.path.join(self.test_data_dir, 'xxx')
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
def test_add_dup_dir_raises(self):
d1 = os.path.join(self.test_data_dir, 'project1')
self.assertEquals([], self.db.dirs)
d1_ = self.db.add_dir(d1)
self.assertRaises(db.DBException, lambda: self.db.add_dir(d1))
def test_add_nested_dir_doesnt_dup(self):
self.db.add_dir(self.test_data_dir)
sub_dir = os.path.join(self.test_data_dir, 'project1')
self.db.add_dir(sub_dir)
self.assertFalse(self.db.is_up_to_date)
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
res = self.db.search('MySubSystem.c')
self.assertEquals(1, len(res.filenames))
self.assertEquals(os.path.join(self.test_data_dir, 'project1/MySubSystem.c'), res.filenames[0])
def test_search_finds_new_file(self):
self.db.add_dir(self.test_data_dir)
self.db.sync()
res = self.db.search('MySubSystem_NEW.c')
self.assertEquals(0, len(res.filenames))
time.sleep(1.2) # let st_mtime advance a second
self.test_data.write1('project1/MySubSystem_NEW.c')
self.db.sync()
res = self.db.search('MySubSystem_NEW.c')
self.assertEquals(1, len(res.filenames))
def test_dir_query(self):
self.db.add_dir(self.test_data_dir)
sub_dir = os.path.join(self.test_data_dir, 'project1/')
self.db.add_dir(sub_dir)
self.assertFalse(self.db.is_up_to_date)
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
res = self.db.search('MySubSystem.c')
self.assertTrue(len(res.filenames) >= 1)
self.assertTrue(os.path.join(self.test_data_dir, 'project1/MySubSystem.c') in res.filenames)
def test_search_unique(self):
self.db.add_dir(self.test_data_dir)
self.assertFalse(self.db.is_up_to_date)
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
res = self.db.search('MySubSystem.c')
self.assertEquals(1, len(res.filenames))
self.assertEquals(os.path.join(self.test_data_dir, 'project1/MySubSystem.c'), res.filenames[0])
def test_search_with_dir(self):
self.db.add_dir(self.test_data_dir)
self.assertFalse(self.db.is_up_to_date)
self.assertFalse(self.db.status().is_up_to_date)
self.assertTrue(self.db.status().status != '')
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
res = self.db.search('project1/MySubSystem.c')
self.assertEquals(1, len(res.filenames))
self.assertEquals(os.path.join(self.test_data_dir, 'project1/MySubSystem.c'), res.filenames[0])
def test_partial_search(self):
self.db.add_dir(self.test_data_dir)
self.assertFalse(self.db.is_up_to_date)
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
res = self.db.search('MyClass')
self.assertTrue(len(res.filenames) >= 2)
self.assertTrue(os.path.join(self.test_data_dir, 'project1/MyClass.c') in res.filenames)
self.assertTrue(os.path.join(self.test_data_dir, 'project1/MyClass.h') in res.filenames)
def test_dir_symlinks_dont_dup(self):
pass
def test_ignores(self):
self.db.add_dir(self.test_data_dir)
self.assertFalse(self.db.is_up_to_date)
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
# .git should not be found
res = self.db.search('packed-refs')
self.assertEquals(0, len(res.filenames))
# file inside .svn should not be found
res = self.db.search('svn_should_not_show_up.txt')
self.assertEquals(0, len(res.filenames))
# certain ignored suffixes should not be found
self.assertEquals([], self.db.search('ignored.o').filenames)
self.assertEquals([], self.db.search('ignored.pyc').filenames)
self.assertEquals([], self.db.search('ignored.pyo').filenames)
def test_ignore_path(self):
# test ignore of absolute path
self.db.add_dir(self.test_data_dir)
self.db.ignore(os.path.join(self.test_data_dir, 'something/*'))
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
self.assertEquals([], self.db.search('something_file.txt').filenames)
def test_ignore_inside_symlink(self):
# the case of interest here is where someone has
# /project1_symlink_dir --> /project1
# /project1_symlink is the one requested to be indexed
# and they ask to ignore something inside the symlink
project1_symlink_dir = os.path.join(self.test_data_dir, "project1_symlink/")
self.db.add_dir(project1_symlink_dir)
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
ref_file = os.path.join(self.test_data_dir, "project1/module/project1_module1.txt")
# first make sure it shows up via project1_symlink at the non-symlink location
hits = self.db.search('project1_module1.txt').filenames
self.assertTrue(ref_file in hits)
# now ignore something inside project1_symlink
self.db.ignore(os.path.join(self.test_data_dir, 'project1_symlink/module/*'))
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
hits = self.db.search('project1_module1.txt').filenames
self.assertTrue(ref_file not in hits)
def test_empty_search(self):
self.db.add_dir(self.test_data_dir)
self.db.sync()
self.assertEquals([], self.db.search('').filenames)
def test_ignore_ctl(self):
self.db.add_dir(self.test_data_dir)
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
res = self.db.search('svn_should_not_show_up.txt')
self.assertEquals(0, len(res.filenames))
orig = list(self.db.ignores)
for i in orig:
self.db.unignore(i)
self.assertFalse(self.db.is_up_to_date)
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
res = self.db.search('svn_should_not_show_up.txt')
self.assertEquals(1, len(res.filenames))
for i in orig:
self.db.ignore(i)
self.assertFalse(self.db.is_up_to_date)
self.db.sync()
self.assertTrue(self.db.has_index and self.db.is_up_to_date)
res = self.db.search('svn_should_not_show_up.txt')
self.assertEquals(0, len(res.filenames))
def test_sync(self):
self.db.add_dir(self.test_data_dir)
self.assertFalse(self.db.is_up_to_date)
self.db.sync()
self.assertTrue(self.db.has_index)
self.assertTrue(self.db.is_up_to_date)
def test_dup_ignore_ctl(self):
self.db.add_dir(self.test_data_dir)
seq = set(self.db.ignores)
seq.add("foo")
self.db.ignore("foo")
self.db.ignore("foo")
self.assertEquals(seq, set(self.db.ignores))
self.db.unignore("foo")
self.assertRaises(Exception, lambda: self.db.unignore("foo"))
self.assertFalse(self.db.is_up_to_date)
def test_search_max_hits(self):
self.db.add_dir(self.test_data_dir)
self.db.sync()
res = self.db.search("a")
self.assertTrue(len(res.filenames) > 2)
resLimited = self.db.search("a", max_hits=2)
self.assertEquals(2, len(resLimited.filenames))
def test_exact_search(self):
x = self.db.add_dir(self.test_data_dir)
self.db.sync()
r = self.db.search("nonexistent", exact_match = True)
self.assertEquals(r.filenames, [])
r = self.db.search("MyClass.c", exact_match = True)
self.assertEquals(r.filenames, [self.test_data.path_to("project1/MyClass.c")])
exact_readme1 = self.test_data.path_to("something/README")
exact_readme2 = self.test_data.path_to("svnproj/README")
r = self.db.search("README", exact_match = True)
self.assertEquals(set(r.filenames), set([exact_readme1, exact_readme2]))
r = self.db.search(exact_readme1, exact_match = True)
self.assertEquals(r.filenames, [exact_readme1])
r = self.db.search(exact_readme2, exact_match = True)
self.assertEquals(r.filenames, [exact_readme2])
| |
import os
import unittest
import sys
import time
try:
from tests_pydevd_python import debugger_unittest
except:
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from tests_pydevd_python import debugger_unittest
class WriterThreadStepAndResume(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case10.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(10, 'Method2')
self.write_add_breakpoint(2, 'Method1')
self.write_make_initial_run()
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 10, 'Expected return to be in line 10, was: %s' % line
assert suspend_type == "frame_eval", 'Expected suspend type to be "frame_eval", but was: %s' % suspend_type
self.write_step_over(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('108', True)
assert line == 11, 'Expected return to be in line 11, was: %s' % line
# we use tracing debugger while stepping
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_run_thread(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 2, 'Expected return to be in line 2, was: %s' % line
# we enable frame evaluation debugger after "Resume" command
assert suspend_type == "frame_eval", 'Expected suspend type to be "frame_eval", but was: %s' % suspend_type
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadStepReturn(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case56.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(2, 'Call2')
self.write_make_initial_run()
thread_id, frame_id, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type()
assert suspend_type == "frame_eval", 'Expected suspend type to be "frame_eval", but was: %s' % suspend_type
self.write_get_frame(thread_id, frame_id)
self.write_step_return(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('109', True)
assert line == 8, 'Expecting it to go to line 8. Went to: %s' % line
# Step return uses temporary breakpoint, so we use tracing debugger
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_step_in(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('107', True)
# goes to line 4 in jython (function declaration line)
assert line in (4, 5), 'Expecting it to go to line 4 or 5. Went to: %s' % line
# we use tracing debugger for stepping
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadAddLineBreakWhileRun(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case3.py')
def run(self):
self.start_socket()
self.write_make_initial_run()
time.sleep(.5)
breakpoint_id = self.write_add_breakpoint(4, '')
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 4, 'Expected return to be in line 4, was: %s' % line
# we use tracing debugger if breakpoint was added while running
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_get_frame(thread_id, frame_id)
self.write_run_thread(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 4, 'Expected return to be in line 4, was: %s' % line
# we still use tracing debugger
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_get_frame(thread_id, frame_id)
self.write_remove_breakpoint(breakpoint_id)
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadExceptionBreak(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case10.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(10, 'Method2')
self.write_add_exception_breakpoint_with_policy('IndexError', "1", "0", "0")
self.write_make_initial_run()
time.sleep(.5)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 10, 'Expected return to be in line 10, was: %s' % line
# we use tracing debugger if there are exception breakpoints
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadAddExceptionBreakWhileRunning(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case10.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(10, 'Method2')
self.write_add_breakpoint(2, 'Method1')
# self.write_add_exception_breakpoint_with_policy('IndexError', "1", "0", "0")
self.write_make_initial_run()
time.sleep(.5)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 10, 'Expected return to be in line 10, was: %s' % line
# we use tracing debugger if there are exception breakpoints
assert suspend_type == "frame_eval", 'Expected suspend type to be "frame_eval", but was: %s' % suspend_type
self.write_add_exception_breakpoint_with_policy('IndexError', "1", "0", "0")
self.write_run_thread(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 2, 'Expected return to be in line 2, was: %s' % line
# we use tracing debugger if exception break was added
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadAddTerminationExceptionBreak(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case10.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(10, 'Method2')
self.write_add_exception_breakpoint_with_policy('IndexError', "0", "1", "0")
self.write_make_initial_run()
time.sleep(.5)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 10, 'Expected return to be in line 10, was: %s' % line
# we can use frame evaluation with exception breakpoint with "On termination" suspend policy
assert suspend_type == "frame_eval", 'Expected suspend type to be "frame_eval", but was: %s' % suspend_type
self.write_run_thread(thread_id)
self.finished_ok = True
class TestFrameEval(unittest.TestCase, debugger_unittest.DebuggerRunner):
def get_command_line(self):
return [sys.executable, '-u']
def test_step_and_resume(self):
self.check_case(WriterThreadStepAndResume)
def test_step_return(self):
self.check_case(WriterThreadStepReturn)
def test_add_break_while_running(self):
self.check_case(WriterThreadAddLineBreakWhileRun)
def test_exc_break(self):
self.check_case(WriterThreadExceptionBreak)
def test_add_exc_break_while_running(self):
self.check_case(WriterThreadAddExceptionBreakWhileRunning)
def test_add_termination_exc_break(self):
self.check_case(WriterThreadAddTerminationExceptionBreak)
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
# coding=utf-8
# Contributor:
# Phus Lu <phus.lu@gmail.com>
__version__ = '2.1.11'
__password__ = ''
__hostsdeny__ = () # __hostsdeny__ = ('.youtube.com', '.youku.com')
import sys
import os
import re
import time
import struct
import zlib
import binascii
import logging
import httplib
import urlparse
import base64
import cStringIO
import hashlib
import hmac
import errno
try:
from google.appengine.api import urlfetch
from google.appengine.runtime import apiproxy_errors
except ImportError:
urlfetch = None
try:
import sae
except ImportError:
sae = None
try:
import socket, select, ssl, thread
except:
socket = None
FetchMax = 2
FetchMaxSize = 1024*1024*4
DeflateMaxSize = 1024*1024*4
Deadline = 60
def error_html(errno, error, description=''):
ERROR_TEMPLATE = '''
<html><head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>{{errno}} {{error}}</title>
<style><!--
body {font-family: arial,sans-serif}
div.nav {margin-top: 1ex}
div.nav A {font-size: 10pt; font-family: arial,sans-serif}
span.nav {font-size: 10pt; font-family: arial,sans-serif; font-weight: bold}
div.nav A,span.big {font-size: 12pt; color: #0000cc}
div.nav A {font-size: 10pt; color: black}
A.l:link {color: #6f6f6f}
A.u:link {color: green}
//--></style>
</head>
<body text=#000000 bgcolor=#ffffff>
<table border=0 cellpadding=2 cellspacing=0 width=100%>
<tr><td bgcolor=#3366cc><font face=arial,sans-serif color=#ffffff><b>Error</b></td></tr>
<tr><td> </td></tr></table>
<blockquote>
<H1>{{error}}</H1>
{{description}}
<p>
</blockquote>
<table width=100% cellpadding=0 cellspacing=0><tr><td bgcolor=#3366cc><img alt="" width=1 height=4></td></tr></table>
</body></html>
'''
kwargs = dict(errno=errno, error=error, description=description)
template = ERROR_TEMPLATE
for keyword, value in kwargs.items():
template = template.replace('{{%s}}' % keyword, value)
return template
def socket_forward(local, remote, timeout=60, tick=2, bufsize=8192, maxping=None, maxpong=None, idlecall=None, bitmask=None):
timecount = timeout
try:
while 1:
timecount -= tick
if timecount <= 0:
break
(ins, _, errors) = select.select([local, remote], [], [local, remote], tick)
if errors:
break
if ins:
for sock in ins:
data = sock.recv(bufsize)
if bitmask:
data = ''.join(chr(ord(x)^bitmask) for x in data)
if data:
if sock is local:
remote.sendall(data)
timecount = maxping or timeout
else:
local.sendall(data)
timecount = maxpong or timeout
else:
return
else:
if idlecall:
try:
idlecall()
except Exception:
logging.exception('socket_forward idlecall fail')
finally:
idlecall = None
except Exception:
logging.exception('socket_forward error')
raise
finally:
if idlecall:
idlecall()
def socks5_handler(sock, address, hls={'hmac':{}}):
if not hls['hmac']:
hls['hmac'] = dict((hmac.new(__password__, chr(x)).hexdigest(),x) for x in xrange(256))
bufsize = 8192
rfile = sock.makefile('rb', bufsize)
wfile = sock.makefile('wb', 0)
remote_addr, remote_port = address
MessageClass = dict
try:
line = rfile.readline(bufsize)
if not line:
raise socket.error('empty line')
method, path, version = line.rstrip().split(' ', 2)
headers = MessageClass()
while 1:
line = rfile.readline(bufsize)
if not line or line == '\r\n':
break
keyword, _, value = line.partition(':')
keyword = keyword.title()
value = value.strip()
headers[keyword] = value
logging.info('%s:%s "%s %s %s" - -', remote_addr, remote_port, method, path, version)
if headers.get('Connection', '').lower() != 'upgrade':
logging.error('%s:%s Connection(%s) != "upgrade"', remote_addr, remote_port, headers.get('Connection'))
return
m = re.search('([0-9a-f]{32})', path)
if not m:
logging.error('%s:%s Path(%s) not valid', remote_addr, remote_port, path)
return
need_digest = m.group(1)
bitmask = hls['hmac'].get(need_digest)
if bitmask is None:
logging.error('%s:%s Digest(%s) not match', remote_addr, remote_port, need_digest)
return
else:
logging.info('%s:%s Digest(%s) return bitmask=%r', remote_addr, remote_port, need_digest, bitmask)
wfile.write('HTTP/1.1 101 Switching Protocols\r\nConnection: Upgrade\r\n\r\n')
wfile.flush()
rfile_read = lambda n:''.join(chr(ord(x)^bitmask) for x in rfile.read(n))
wfile_write = lambda s:wfile.write(''.join(chr(ord(x)^bitmask) for x in s))
rfile_read(ord(rfile_read(2)[-1]))
wfile_write(b'\x05\x00');
# 2. Request
data = rfile_read(4)
mode = ord(data[1])
addrtype = ord(data[3])
if addrtype == 1: # IPv4
addr = socket.inet_ntoa(rfile_read(4))
elif addrtype == 3: # Domain name
addr = rfile_read(ord(rfile_read(1)[0]))
port = struct.unpack('>H',rfile_read(2))
reply = b'\x05\x00\x00\x01'
try:
logging.info('%s:%s socks5 mode=%r', remote_addr, remote_port, mode)
if mode == 1: # 1. TCP Connect
remote = socket.create_connection((addr, port[0]))
logging.info('%s:%s TCP Connect to %s:%s', remote_addr, remote_port, addr, port[0])
local = remote.getsockname()
reply += socket.inet_aton(local[0]) + struct.pack(">H", local[1])
else:
reply = b'\x05\x07\x00\x01' # Command not supported
except socket.error:
# Connection refused
reply = '\x05\x05\x00\x01\x00\x00\x00\x00\x00\x00'
wfile_write(reply)
# 3. Transfering
if reply[1] == '\x00': # Success
if mode == 1: # 1. Tcp connect
socket_forward(sock, remote, bitmask=bitmask)
except socket.error as e:
if e[0] not in (10053, errno.EPIPE, 'empty line'):
raise
finally:
rfile.close()
wfile.close()
sock.close()
def paas_application(environ, start_response):
if environ['REQUEST_METHOD'] == 'GET':
start_response('302 Found', [('Location', 'https://www.google.com')])
raise StopIteration
# inflate = lambda x:zlib.decompress(x, -15)
wsgi_input = environ['wsgi.input']
data = wsgi_input.read(2)
metadata_length, = struct.unpack('!h', data)
metadata = wsgi_input.read(metadata_length)
metadata = zlib.decompress(metadata, -15)
headers = dict(x.split(':', 1) for x in metadata.splitlines() if x)
method = headers.pop('G-Method')
url = headers.pop('G-Url')
kwargs = {}
any(kwargs.__setitem__(x[2:].lower(), headers.pop(x)) for x in headers.keys() if x.startswith('G-'))
headers['Connection'] = 'close'
payload = environ['wsgi.input'].read() if 'Content-Length' in headers else None
if 'Content-Encoding' in headers:
if headers['Content-Encoding'] == 'deflate':
payload = zlib.decompress(payload, -15)
headers['Content-Length'] = str(len(payload))
del headers['Content-Encoding']
if __password__ and __password__ != kwargs.get('password'):
random_host = 'g%d%s' % (int(time.time()*100), environ['HTTP_HOST'])
conn = httplib.HTTPConnection(random_host, timeout=3)
conn.request('GET', '/')
response = conn.getresponse(True)
status_line = '%s %s' % (response.status, httplib.responses.get(response.status, 'OK'))
start_response(status_line, response.getheaders())
yield response.read()
raise StopIteration
if __hostsdeny__ and urlparse.urlparse(url).netloc.endswith(__hostsdeny__):
start_response('403 Forbidden', [('Content-Type', 'text/html')])
yield error_html('403', 'Hosts Deny', description='url=%r' % url)
raise StopIteration
timeout = Deadline
logging.info('%s "%s %s %s" - -', environ['REMOTE_ADDR'], method, url, 'HTTP/1.1')
if method != 'CONNECT':
try:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
HTTPConnection = httplib.HTTPSConnection if scheme == 'https' else httplib.HTTPConnection
if params:
path += ';' + params
if query:
path += '?' + query
conn = HTTPConnection(netloc, timeout=timeout)
conn.request(method, path, body=payload, headers=headers)
response = conn.getresponse()
headers = [('X-Status', str(response.status))]
headers += [(k, v) for k, v in response.msg.items() if k != 'transfer-encoding']
start_response('200 OK', headers)
bufsize = 8192
while 1:
data = response.read(bufsize)
if not data:
response.close()
break
yield data
except httplib.HTTPException as e:
raise
def gae_application(environ, start_response):
if environ['REQUEST_METHOD'] == 'GET':
if '204' in environ['QUERY_STRING']:
start_response('204 No Content', [])
yield ''
else:
timestamp = long(os.environ['CURRENT_VERSION_ID'].split('.')[1])/pow(2,28)
ctime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(timestamp+8*3600))
html = u'GoAgent Python Server %s \u5df2\u7ecf\u5728\u5de5\u4f5c\u4e86\uff0c\u90e8\u7f72\u65f6\u95f4 %s\n' % (__version__, ctime)
start_response('200 OK', [('Content-Type', 'text/plain; charset=utf-8')])
yield html.encode('utf8')
raise StopIteration
# inflate = lambda x:zlib.decompress(x, -15)
wsgi_input = environ['wsgi.input']
data = wsgi_input.read(2)
metadata_length, = struct.unpack('!h', data)
metadata = wsgi_input.read(metadata_length)
metadata = zlib.decompress(metadata, -15)
headers = dict(x.split(':', 1) for x in metadata.splitlines() if x)
method = headers.pop('G-Method')
url = headers.pop('G-Url')
kwargs = {}
any(kwargs.__setitem__(x[2:].lower(), headers.pop(x)) for x in headers.keys() if x.startswith('G-'))
#logging.info('%s "%s %s %s" - -', environ['REMOTE_ADDR'], method, url, 'HTTP/1.1')
#logging.info('request headers=%s', headers)
if __password__ and __password__ != kwargs.get('password', ''):
start_response('403 Forbidden', [('Content-Type', 'text/html')])
yield error_html('403', 'Wrong password', description='GoAgent proxy.ini password is wrong!')
raise StopIteration
if __hostsdeny__ and urlparse.urlparse(url).netloc.endswith(__hostsdeny__):
start_response('403 Forbidden', [('Content-Type', 'text/html')])
yield error_html('403', 'Hosts Deny', description='url=%r' % url)
raise StopIteration
fetchmethod = getattr(urlfetch, method, '')
if not fetchmethod:
start_response('501 Unsupported', [('Content-Type', 'text/html')])
yield error_html('501', 'Invalid Method: %r'% method, description='Unsupported Method')
raise StopIteration
deadline = Deadline
headers = dict(headers)
headers['Connection'] = 'close'
payload = environ['wsgi.input'].read() if 'Content-Length' in headers else None
if 'Content-Encoding' in headers:
if headers['Content-Encoding'] == 'deflate':
payload = zlib.decompress(payload, -15)
headers['Content-Length'] = str(len(payload))
del headers['Content-Encoding']
accept_encoding = headers.get('Accept-Encoding', '')
errors = []
for i in xrange(int(kwargs.get('fetchmax', FetchMax))):
try:
response = urlfetch.fetch(url, payload, fetchmethod, headers, allow_truncated=False, follow_redirects=False, deadline=deadline, validate_certificate=False)
break
except apiproxy_errors.OverQuotaError as e:
time.sleep(5)
except urlfetch.DeadlineExceededError as e:
errors.append('%r, deadline=%s' % (e, deadline))
logging.error('DeadlineExceededError(deadline=%s, url=%r)', deadline, url)
time.sleep(1)
deadline = Deadline * 2
except urlfetch.DownloadError as e:
errors.append('%r, deadline=%s' % (e, deadline))
logging.error('DownloadError(deadline=%s, url=%r)', deadline, url)
time.sleep(1)
deadline = Deadline * 2
except urlfetch.ResponseTooLargeError as e:
response = e.response
logging.error('ResponseTooLargeError(deadline=%s, url=%r) response(%r)', deadline, url, response)
m = re.search(r'=\s*(\d+)-', headers.get('Range') or headers.get('range') or '')
if m is None:
headers['Range'] = 'bytes=0-%d' % int(kwargs.get('fetchmaxsize', FetchMaxSize))
else:
headers.pop('Range', '')
headers.pop('range', '')
start = int(m.group(1))
headers['Range'] = 'bytes=%s-%d' % (start, start+int(kwargs.get('fetchmaxsize', FetchMaxSize)))
deadline = Deadline * 2
except Exception as e:
errors.append(str(e))
if i==0 and method=='GET':
deadline = Deadline * 2
else:
start_response('500 Internal Server Error', [('Content-Type', 'text/html')])
yield error_html('502', 'Python Urlfetch Error: %r' % method, description='<br />\n'.join(errors) or 'UNKOWN')
raise StopIteration
#logging.debug('url=%r response.status_code=%r response.headers=%r response.content[:1024]=%r', url, response.status_code, dict(response.headers), response.content[:1024])
data = response.content
if 'content-encoding' not in response.headers and len(response.content) < DeflateMaxSize and response.headers.get('content-type', '').startswith(('text/', 'application/json', 'application/javascript')):
if 'deflate' in accept_encoding:
response.headers['Content-Encoding'] = 'deflate'
data = zlib.compress(data)[2:-4]
elif 'gzip' in accept_encoding:
response.headers['Content-Encoding'] = 'gzip'
compressobj = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
dataio = cStringIO.StringIO()
dataio.write('\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff')
dataio.write(compressobj.compress(data))
dataio.write(compressobj.flush())
dataio.write(struct.pack('<LL', zlib.crc32(data)&0xFFFFFFFFL, len(data)&0xFFFFFFFFL))
data = dataio.getvalue()
response.headers['Content-Length'] = str(len(data))
response_headers = zlib.compress('\n'.join('%s:%s'%(k.title(),v) for k, v in response.headers.items() if not k.startswith('x-google-')))[2:-4]
start_response('200 OK', [('Content-Type', 'image/gif')])
yield struct.pack('!hh', int(response.status_code), len(response_headers))+response_headers
yield data
app = gae_application if urlfetch else paas_application
application = app if sae is None else sae.create_wsgi_app(app)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(levelname)s - - %(asctime)s %(message)s', datefmt='[%b %d %H:%M:%S]')
import gevent, gevent.server, gevent.wsgi, gevent.monkey, getopt
gevent.monkey.patch_all(dns=gevent.version_info[0]>=1)
options = dict(getopt.getopt(sys.argv[1:], 'l:p:a:')[0])
host = options.get('-l', '0.0.0.0')
port = options.get('-p', '80')
app = options.get('-a', 'socks5')
if app == 'socks5':
server = gevent.server.StreamServer((host, int(port)), socks5_handler)
else:
server = gevent.wsgi.WSGIServer((host, int(port)), paas_application)
logging.info('serving %s at http://%s:%s/', app.upper(), server.address[0], server.address[1])
server.serve_forever()
| |
# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import hashlib
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.compat import json
import boto
class SNSConnection(AWSQueryConnection):
"""
Amazon Simple Notification Service
Amazon Simple Notification Service (Amazon SNS) is a web service
that enables you to build distributed web-enabled applications.
Applications can use Amazon SNS to easily push real-time
notification messages to interested subscribers over multiple
delivery protocols. For more information about this product see
`http://aws.amazon.com/sns`_. For detailed information about
Amazon SNS features and their associated API calls, see the
`Amazon SNS Developer Guide`_.
We also provide SDKs that enable you to access Amazon SNS from
your preferred programming language. The SDKs contain
functionality that automatically takes care of tasks such as:
cryptographically signing your service requests, retrying
requests, and handling error responses. For a list of available
SDKs, go to `Tools for Amazon Web Services`_.
"""
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'sns.us-east-1.amazonaws.com'
APIVersion = '2010-03-31'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
connection_cls=SNSConnection)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs)
def _build_dict_as_list_params(self, params, dictionary, name):
"""
Serialize a parameter 'name' which value is a 'dictionary' into a list of parameters.
See: http://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html
For example::
dictionary = {'PlatformPrincipal': 'foo', 'PlatformCredential': 'bar'}
name = 'Attributes'
would result in params dict being populated with:
Attributes.entry.1.key = PlatformPrincipal
Attributes.entry.1.value = foo
Attributes.entry.2.key = PlatformCredential
Attributes.entry.2.value = bar
:param params: the resulting parameters will be added to this dict
:param dictionary: dict - value of the serialized parameter
:param name: name of the serialized parameter
"""
items = sorted(dictionary.items(), key=lambda x:x[0])
for kv, index in zip(items, range(1, len(items)+1)):
key, value = kv
prefix = '%s.entry.%s' % (name, index)
params['%s.key' % prefix] = key
params['%s.value' % prefix] = value
def _required_auth_capability(self):
return ['hmac-v4']
def get_all_topics(self, next_token=None):
"""
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListTopics', params)
def get_topic_attributes(self, topic):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
"""
params = {'TopicArn': topic}
return self._make_request('GetTopicAttributes', params)
def set_topic_attributes(self, topic, attr_name, attr_value):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
:type attr_name: string
:param attr_name: The name of the attribute you want to set.
Only a subset of the topic's attributes are mutable.
Valid values: Policy | DisplayName
:type attr_value: string
:param attr_value: The new value for the attribute.
"""
params = {'TopicArn': topic,
'AttributeName': attr_name,
'AttributeValue': attr_value}
return self._make_request('SetTopicAttributes', params)
def add_permission(self, topic, label, account_ids, actions):
"""
Adds a statement to a topic's access control policy, granting
access for the specified AWS accounts to the specified actions.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the new policy statement.
:type account_ids: list of strings
:param account_ids: The AWS account ids of the users who will be
give access to the specified actions.
:type actions: list of strings
:param actions: The actions you want to allow for each of the
specified principal(s).
"""
params = {'TopicArn': topic,
'Label': label}
self.build_list_params(params, account_ids, 'AWSAccountId.member')
self.build_list_params(params, actions, 'ActionName.member')
return self._make_request('AddPermission', params)
def remove_permission(self, topic, label):
"""
Removes a statement from a topic's access control policy.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the policy statement
to be removed.
"""
params = {'TopicArn': topic,
'Label': label}
return self._make_request('RemovePermission', params)
def create_topic(self, topic):
"""
Create a new Topic.
:type topic: string
:param topic: The name of the new topic.
"""
params = {'Name': topic}
return self._make_request('CreateTopic', params)
def delete_topic(self, topic):
"""
Delete an existing topic
:type topic: string
:param topic: The ARN of the topic
"""
params = {'TopicArn': topic}
return self._make_request('DeleteTopic', params, '/', 'GET')
def publish(self, topic=None, message=None, subject=None, target_arn=None,
message_structure=None):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type message: string
:param message: The message you want to send to the topic.
Messages must be UTF-8 encoded strings and
be at most 4KB in size.
:type message_structure: string
:param message_structure: Optional parameter. If left as ``None``,
plain text will be sent. If set to ``json``,
your message should be a JSON string that
matches the structure described at
http://docs.aws.amazon.com/sns/latest/dg/PublishTopic.html#sns-message-formatting-by-protocol
:type subject: string
:param subject: Optional parameter to be used as the "Subject"
line of the email notifications.
:type target_arn: string
:param target_arn: Optional parameter for either TopicArn or
EndpointArn, but not both.
"""
if message is None:
# To be backwards compatible when message did not have
# a default value and topic and message were required
# args.
raise TypeError("'message' is a required parameter")
params = {'Message': message}
if subject is not None:
params['Subject'] = subject
if topic is not None:
params['TopicArn'] = topic
if target_arn is not None:
params['TargetArn'] = target_arn
if message_structure is not None:
params['MessageStructure'] = message_structure
return self._make_request('Publish', params)
def subscribe(self, topic, protocol, endpoint):
"""
Subscribe to a Topic.
:type topic: string
:param topic: The ARN of the new topic.
:type protocol: string
:param protocol: The protocol used to communicate with
the subscriber. Current choices are:
email|email-json|http|https|sqs
:type endpoint: string
:param endpoint: The location of the endpoint for
the subscriber.
* For email, this would be a valid email address
* For email-json, this would be a valid email address
* For http, this would be a URL beginning with http
* For https, this would be a URL beginning with https
* For sqs, this would be the ARN of an SQS Queue
"""
params = {'TopicArn': topic,
'Protocol': protocol,
'Endpoint': endpoint}
return self._make_request('Subscribe', params)
def subscribe_sqs_queue(self, topic, queue):
"""
Subscribe an SQS queue to a topic.
This is convenience method that handles most of the complexity involved
in using an SQS queue as an endpoint for an SNS topic. To achieve this
the following operations are performed:
* The correct ARN is constructed for the SQS queue and that ARN is
then subscribed to the topic.
* A JSON policy document is contructed that grants permission to
the SNS topic to send messages to the SQS queue.
* This JSON policy is then associated with the SQS queue using
the queue's set_attribute method. If the queue already has
a policy associated with it, this process will add a Statement to
that policy. If no policy exists, a new policy will be created.
:type topic: string
:param topic: The ARN of the new topic.
:type queue: A boto Queue object
:param queue: The queue you wish to subscribe to the SNS Topic.
"""
t = queue.id.split('/')
q_arn = queue.arn
sid = hashlib.md5(topic + q_arn).hexdigest()
sid_exists = False
resp = self.subscribe(topic, 'sqs', q_arn)
attr = queue.get_attributes('Policy')
if 'Policy' in attr:
policy = json.loads(attr['Policy'])
else:
policy = {}
if 'Version' not in policy:
policy['Version'] = '2008-10-17'
if 'Statement' not in policy:
policy['Statement'] = []
# See if a Statement with the Sid exists already.
for s in policy['Statement']:
if s['Sid'] == sid:
sid_exists = True
if not sid_exists:
statement = {'Action': 'SQS:SendMessage',
'Effect': 'Allow',
'Principal': {'AWS': '*'},
'Resource': q_arn,
'Sid': sid,
'Condition': {'StringLike': {'aws:SourceArn': topic}}}
policy['Statement'].append(statement)
queue.set_attribute('Policy', json.dumps(policy))
return resp
def confirm_subscription(self, topic, token,
authenticate_on_unsubscribe=False):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type token: string
:param token: Short-lived token sent to and endpoint during
the Subscribe operation.
:type authenticate_on_unsubscribe: bool
:param authenticate_on_unsubscribe: Optional parameter indicating
that you wish to disable
unauthenticated unsubscription
of the subscription.
"""
params = {'TopicArn': topic, 'Token': token}
if authenticate_on_unsubscribe:
params['AuthenticateOnUnsubscribe'] = 'true'
return self._make_request('ConfirmSubscription', params)
def unsubscribe(self, subscription):
"""
Allows endpoint owner to delete subscription.
Confirmation message will be delivered.
:type subscription: string
:param subscription: The ARN of the subscription to be deleted.
"""
params = {'SubscriptionArn': subscription}
return self._make_request('Unsubscribe', params)
def get_all_subscriptions(self, next_token=None):
"""
Get list of all subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListSubscriptions', params)
def get_all_subscriptions_by_topic(self, topic, next_token=None):
"""
Get list of all subscriptions to a specific topic.
:type topic: string
:param topic: The ARN of the topic for which you wish to
find subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {'TopicArn': topic}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListSubscriptionsByTopic', params)
def create_platform_application(self, name=None, platform=None,
attributes=None):
"""
The `CreatePlatformApplication` action creates a platform
application object for one of the supported push notification
services, such as APNS and GCM, to which devices and mobile
apps may register. You must specify PlatformPrincipal and
PlatformCredential attributes when using the
`CreatePlatformApplication` action. The PlatformPrincipal is
received from the notification service. For APNS/APNS_SANDBOX,
PlatformPrincipal is "SSL certificate". For GCM,
PlatformPrincipal is not applicable. For ADM,
PlatformPrincipal is "client id". The PlatformCredential is
also received from the notification service. For
APNS/APNS_SANDBOX, PlatformCredential is "private key". For
GCM, PlatformCredential is "API key". For ADM,
PlatformCredential is "client secret". The
PlatformApplicationArn that is returned when using
`CreatePlatformApplication` is then used as an attribute for
the `CreatePlatformEndpoint` action. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type name: string
:param name: Application names must be made up of only uppercase and
lowercase ASCII letters, numbers, underscores, hyphens, and
periods, and must be between 1 and 256 characters long.
:type platform: string
:param platform: The following platforms are supported: ADM (Amazon
Device Messaging), APNS (Apple Push Notification Service),
APNS_SANDBOX, and GCM (Google Cloud Messaging).
:type attributes: map
:param attributes: For a list of attributes, see
`SetPlatformApplicationAttributes`_
"""
params = {}
if name is not None:
params['Name'] = name
if platform is not None:
params['Platform'] = platform
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='CreatePlatformApplication',
params=params)
def set_platform_application_attributes(self,
platform_application_arn=None,
attributes=None):
"""
The `SetPlatformApplicationAttributes` action sets the
attributes of the platform application object for the
supported push notification services, such as APNS and GCM.
For more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
SetPlatformApplicationAttributes action.
:type attributes: map
:param attributes:
A map of the platform application attributes. Attributes in this map
include the following:
+ `PlatformCredential` -- The credential received from the notification
service. For APNS/APNS_SANDBOX, PlatformCredential is "private
key". For GCM, PlatformCredential is "API key". For ADM,
PlatformCredential is "client secret".
+ `PlatformPrincipal` -- The principal received from the notification
service. For APNS/APNS_SANDBOX, PlatformPrincipal is "SSL
certificate". For GCM, PlatformPrincipal is not applicable. For
ADM, PlatformPrincipal is "client id".
+ `EventEndpointCreated` -- Topic ARN to which EndpointCreated event
notifications should be sent.
+ `EventEndpointDeleted` -- Topic ARN to which EndpointDeleted event
notifications should be sent.
+ `EventEndpointUpdated` -- Topic ARN to which EndpointUpdate event
notifications should be sent.
+ `EventDeliveryFailure` -- Topic ARN to which DeliveryFailure event
notifications should be sent upon Direct Publish delivery failure
(permanent) to one of the application's endpoints.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='SetPlatformApplicationAttributes',
params=params)
def get_platform_application_attributes(self,
platform_application_arn=None):
"""
The `GetPlatformApplicationAttributes` action retrieves the
attributes of the platform application object for the
supported push notification services, such as APNS and GCM.
For more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
GetPlatformApplicationAttributesInput.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
return self._make_request(action='GetPlatformApplicationAttributes',
params=params)
def list_platform_applications(self, next_token=None):
"""
The `ListPlatformApplications` action lists the platform
application objects for the supported push notification
services, such as APNS and GCM. The results for
`ListPlatformApplications` are paginated and return a limited
list of applications, up to 100. If additional records are
available after the first page results, then a NextToken
string will be returned. To receive the next page, you call
`ListPlatformApplications` using the NextToken string received
from the previous call. When there are no more records to
return, NextToken will be null. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type next_token: string
:param next_token: NextToken string is used when calling
ListPlatformApplications action to retrieve additional records that
are available after the first page results.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self._make_request(action='ListPlatformApplications',
params=params)
def list_endpoints_by_platform_application(self,
platform_application_arn=None,
next_token=None):
"""
The `ListEndpointsByPlatformApplication` action lists the
endpoints and endpoint attributes for devices in a supported
push notification service, such as GCM and APNS. The results
for `ListEndpointsByPlatformApplication` are paginated and
return a limited list of endpoints, up to 100. If additional
records are available after the first page results, then a
NextToken string will be returned. To receive the next page,
you call `ListEndpointsByPlatformApplication` again using the
NextToken string received from the previous call. When there
are no more records to return, NextToken will be null. For
more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
ListEndpointsByPlatformApplicationInput action.
:type next_token: string
:param next_token: NextToken string is used when calling
ListEndpointsByPlatformApplication action to retrieve additional
records that are available after the first page results.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if next_token is not None:
params['NextToken'] = next_token
return self._make_request(action='ListEndpointsByPlatformApplication',
params=params)
def delete_platform_application(self, platform_application_arn=None):
"""
The `DeletePlatformApplication` action deletes a platform
application object for one of the supported push notification
services, such as APNS and GCM. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn of platform
application object to delete.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
return self._make_request(action='DeletePlatformApplication',
params=params)
def create_platform_endpoint(self, platform_application_arn=None,
token=None, custom_user_data=None,
attributes=None):
"""
The `CreatePlatformEndpoint` creates an endpoint for a device
and mobile app on one of the supported push notification
services, such as GCM and APNS. `CreatePlatformEndpoint`
requires the PlatformApplicationArn that is returned from
`CreatePlatformApplication`. The EndpointArn that is returned
when using `CreatePlatformEndpoint` can then be used by the
`Publish` action to send a message to a mobile app or by the
`Subscribe` action for subscription to a topic. For more
information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn returned from
CreatePlatformApplication is used to create a an endpoint.
:type token: string
:param token: Unique identifier created by the notification service for
an app on a device. The specific name for Token will vary,
depending on which notification service is being used. For example,
when using APNS as the notification service, you need the device
token. Alternatively, when using GCM or ADM, the device token
equivalent is called the registration ID.
:type custom_user_data: string
:param custom_user_data: Arbitrary user data to associate with the
endpoint. SNS does not use this data. The data must be in UTF-8
format and less than 2KB.
:type attributes: map
:param attributes: For a list of attributes, see
`SetEndpointAttributes`_.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if token is not None:
params['Token'] = token
if custom_user_data is not None:
params['CustomUserData'] = custom_user_data
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='CreatePlatformEndpoint',
params=params)
def delete_endpoint(self, endpoint_arn=None):
"""
The `DeleteEndpoint` action, which is idempotent, deletes the
endpoint from SNS. For more information, see `Using Amazon SNS
Mobile Push Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn of endpoint to delete.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
return self._make_request(action='DeleteEndpoint', params=params)
def set_endpoint_attributes(self, endpoint_arn=None, attributes=None):
"""
The `SetEndpointAttributes` action sets the attributes for an
endpoint for a device on one of the supported push
notification services, such as GCM and APNS. For more
information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn used for SetEndpointAttributes action.
:type attributes: map
:param attributes:
A map of the endpoint attributes. Attributes in this map include the
following:
+ `CustomUserData` -- arbitrary user data to associate with the
endpoint. SNS does not use this data. The data must be in UTF-8
format and less than 2KB.
+ `Enabled` -- flag that enables/disables delivery to the endpoint.
Message Processor will set this to false when a notification
service indicates to SNS that the endpoint is invalid. Users can
set it back to true, typically after updating Token.
+ `Token` -- device token, also referred to as a registration id, for
an app and mobile device. This is returned from the notification
service when an app and mobile device are registered with the
notification service.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='SetEndpointAttributes',
params=params)
def get_endpoint_attributes(self, endpoint_arn=None):
"""
The `GetEndpointAttributes` retrieves the endpoint attributes
for a device on one of the supported push notification
services, such as GCM and APNS. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn for GetEndpointAttributes input.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
return self._make_request(action='GetEndpointAttributes',
params=params)
def _make_request(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb=verb,
path=path, params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
| |
import urllib
import datetime
import logging
import re
import unicodedata
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext as _
import jsonfield.fields
class Gateway(models.Model):
"""
A Gateway is a sending endpoint, and associated authentication info
that can be used to send and receive messages.
"""
name = models.CharField(max_length=128, unique=True)
# These fields are used in the request to send a message via a gateway.
base_url = models.URLField()
settings = jsonfield.fields.JSONField(default={},
help_text=_(u'A JSON Dictionary of key-value pairs that will be '
'used for every message. Authorisation credentials should go '
'in here, for example.'
))
recipient_keyword = models.CharField(max_length=128,
help_text=_(u'The keyword that is used in the request to identify the recipient number.')
)
content_keyword = models.CharField(max_length=128,
help_text=_(u'The keyword that is used in the request to identify the message content.')
)
uuid_keyword = models.CharField(max_length=128, null=True, blank=True,
help_text=_(u'The keyword used in the request for our message reference id.')
)
# These fields are used to parse the response for status/charge info
success_format = models.CharField(max_length=256, null=True, blank=True,
help_text=_(u'A regular expression that parses the response. May contain named groups for "gateway_message_id", "status_message" and "status_code".'))
error_format = models.CharField(max_length=256, null=True, blank=True,
help_text=_(u'A regular expression that parses an error response. Must contain named group for "status_message".')
)
status_mapping = jsonfield.JSONField(default={},
help_text=_(u"A mapping of returned status codes to our status choices. These will be used to match the success_format string to Unsent/Sent/Failed/Delivered.")
)
charge_keyword = models.CharField(max_length=128, null=True, blank=True,
help_text=_(u'Used in status updates: data matching this field indicates '
'how many \'credits\' this message cost in the gateway'
)
)
status_msg_id = models.CharField(max_length=128, null=True, blank=True,
help_text=_(u'The field that contains our message reference id '
'(see uuid_keyword, above).'
)
)
status_status = models.CharField(max_length=128, null=True, blank=True,
help_text=_(u'The field that contains the status code, used by status_mapping.')
)
status_error_code = models.CharField(max_length=128, null=True, blank=True,
help_text=_(u'The field that contains the error code. May be the same value '
'as status_status, if no seperate error code field is used.'
)
)
status_date = models.CharField(max_length=128, null=True, blank=True,
help_text=_(u'The field that contains the status update date-string. '
'See status_date_format: that is used by this field for parsing.'
)
)
status_date_format = models.CharField(max_length=128, null=True, blank=True,
help_text=_(u'Python datetime formatting code representing the format '
'this gateway uses for delivery time reporting. Leaving this '
'blank means that a unix-style timestamp is used.'
)
)
reply_content = models.CharField(max_length=128, null=True, blank=True)
reply_sender = models.CharField(max_length=128, null=True, blank=True)
reply_date = models.CharField(max_length=128, null=True, blank=True)
reply_date_format = models.CharField(max_length=128, null=True, blank=True,
default="%Y-%m-%d %H:%M:%S",
)
check_number_url = models.CharField(max_length=256, null=True, blank=True,
help_text=_(u'The URL that can be used to check availability of sending to a number'))
check_number_field = models.CharField(max_length=65, null=True, blank=True,
help_text=_(u'The keyword that contains the number to check'))
check_number_response_format = models.CharField(max_length=256, null=True, blank=True,
help_text=_(u'A regular expression that parses the response. Keys: status, charge'))
check_number_status_mapping = jsonfield.JSONField(null=True, blank=True)
query_balance_url = models.CharField(max_length=256, null=True, blank=True,
help_text=_(u'The url path that queries for balance'))
query_balance_params = jsonfield.fields.JSONField(default=[])
query_balance_response_format = models.CharField(max_length=128, null=True, blank=True)
class Meta:
app_label = 'sms'
def __unicode__(self):
return self.name
def send(self, message):
"""
Use this gateway to send a message.
If ``djcelery`` is installed, then we assume they have set up the
``celeryd`` server, and we queue for delivery. Otherwise, we will
send in-process.
.. note::
It is strongly recommended to run this out of process,
especially if you are sending as part of an HttpRequest, as this
could take ~5 seconds per message that is to be sent.
"""
if 'djcelery' in settings.INSTALLED_APPS:
import sms.tasks
sms.tasks.SendMessage.delay(message.pk, self.pk)
else:
self._send(message)
def _send(self, message):
"""
Actually do the work of sending the message. This is in a seperate
method so we can background it it possible.
"""
assert message.status == "Unsent", "Re-sending SMS Messages not yet supported."
# We need to store the gateway that was used, so we can match up
# which gateway a reply has come through.
message.gateway = self
# Build up a URL-encoded request.
raw_data = {}
if self.settings:
raw_data.update(**self.settings)
if message.recipient_number:
raw_data[self.recipient_keyword] = message.recipient_number
else:
raise ValueError("A recipient_number must be supplied")
# We need to see if this message needs to be sent as unicode.
# Could be smart and try to see if it is a short enough message.
# Or look for a preference that says if this user may send unicode
# messages?
raw_data[self.content_keyword] = unicodedata.normalize(
'NFKD',
unicode(message.content)
).encode('ascii', 'ignore')
if self.uuid_keyword:
assert message.uuid, "Message must have a valid UUID. Has it been saved?"
raw_data[self.uuid_keyword] = message.uuid
data = urllib.urlencode(raw_data)
logging.debug(data)
logging.debug(self)
# Now hit the server.
res = urllib.urlopen(self.base_url, data)
# Most servers will respond with something, which is only an
# interim status, which we can get for now, and maybe update later.
status_msg = res.read()
logging.debug(status_msg)
if self.error_format and re.match(self.error_format, status_msg):
message.status = "Failed"
message.status_message = re.match(self.error_format, status_msg).groupdict()['status_message']
logging.warning(message.status_message)
elif status_msg.startswith('ERR') or status_msg.startswith('WARN'):
message.status = "Failed"
message.status_message = status_msg.split(': ')[1]
logging.warning(message.status_message)
else:
message.status = "Sent"
parsed_response = re.match(self.success_format, status_msg).groupdict()
if 'gateway_message_id' in parsed_response and parsed_response['gateway_message_id']:
message.gateway_message_id = parsed_response['gateway_message_id'].strip()
if 'status_code' in parsed_response and parsed_response['status_code']:
message.status = self.status_mapping.get(parsed_response['status_code'])
if 'status_message' in parsed_response and parsed_response['status_message']:
message.status_message = parsed_response['status_message']
logging.debug("Gateway MSG ID %s" % message.gateway_message_id)
message.send_date = datetime.datetime.now()
message.save()
return message
def check_availability_to_send(self, number):
if not self.check_number_url:
return None
raw_data = {}
raw_data.update(**self.settings)
raw_data[self.check_number_field] = number
data = urllib.urlencode(raw_data)
res = urllib.urlopen(self.check_number_url, data)
res_data = res.read()
if self.check_number_response_format:
parsed_response = re.match(self.check_number_response_format, res_data).groupdict()
status = self.check_number_status_mapping.get(parsed_response.get('status', None), None)
charge = self.check_number_status_mapping.get(parsed_response.get('charge', None), None)
return {
'number': number,
'status': status,
'charge': charge
}
def query_balance(self):
if not self.query_balance_url:
return None
raw_data = {}
for field in self.query_balance_params:
raw_data[field] = self.settings[field]
data = urllib.urlencode(raw_data)
res = urllib.urlopen(self.query_balance_url, data)
res_data = res.read()
if self.query_balance_response_format:
parsed_response = re.match(self.query_balance_response_format, res_data).groupdict()
return parsed_response.get('balance', None)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# FIXME: Uses the selector service, but has no way of indicating to the
# selector service that its services are no longer required.
# This needs resolving.
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
============================
Simple multicast transceiver
============================
A simple component for transmitting and receiving multicast packets.
Remember that multicast is an unreliable connection - packets may be lost,
duplicated or reordered.
Example Usage
-------------
Send a file to, and receive data from multicast group address 1.2.3.4 on
port 1000 with no guarantees of reliability, integrity or packet ordering::
Pipeline( RateControlledFileReader("myfile", rate=100000),
Multicast_transceiver("0.0.0.0", 0, "1.2.3.4", 1000),
).activate()
Pipeline( Multicast_transceiver("0.0.0.0", 1000, "1.2.3.4", 0)
ConsoleEchoer()
).activate()
Or::
Pipeline( RateControlledFileReader("myfile", rate=100000),
Multicast_transceiver("0.0.0.0", 1000, "1.2.3.4", 1000),
ConsoleEchoer()
).activate()
The data emitted by Multicast_transciever (and displayed by ConsoleEchoer) is of
the form (source_address, data).
Behaviour
---------
Data sent to the component's "inbox" inbox is sent to the multicast group.
Data received from the multicast group is emitted as a tuple:
(source_addr, data) where data is a string of the received data.
This component will terminate if a shutdownMicroprocess or producerFinished
message is sent to its "control" inbox. This message is forwarded onto the CSA.
Multicast_transceiver will then wait for the CSA to terminate. It then sends its
own shutdownMicroprocess message out of the "signal" outbox.
Multicast groups do not 'shut down', so this component will not usually emit any
signals on its "signal" outbox. However if, for some reason, there is a socket
error, a shutdownMicroprocess message will be sent out the "signal" outbox and
this component will then immediately terminate.
Why a transciever component?
----------------------------
Listens for packets in the given multicast group. Any data received is
sent to the receiver's outbox. The logic here is likely to be not quite
ideal. When complete though, this will be preferable over the sender and
receiver components since it models what multicast really is rather than
what people tend to think it is.
How does it work?
-----------------
Multicast_transceiver opens a socket connection to the specified server on the
specified port. Data received over the connection appears at the component's
"outbox" outbox as strings. Data can be sent as strings by sending it to the
"inbox" inbox.
An optional delay (between component activation and attempting to connect) can
be specified. The default is no delay.
It creates a ConnectedSocketAdapter (CSA) to handle the socket connection and
registers it with a selectorComponent so it is notified of incoming data. The
selectorComponent is obtained by calling
selectorComponent.getSelectorService(...) to look it up with the local
Coordinating Assistant Tracker (CAT).
Multicast_transceiver wires itself to the "CreatorFeedback" outbox of the CSA.
It also wires its "inbox" inbox to pass data straight through to the CSA's
"inbox" inbox, and its "outbox" outbox to pass through data from the CSA's
"outbox" outbox.
Socket errors (after the connection has been successfully established) may be
sent to the "signal" outbox.
This component will terminate if the CSA sends a socketShutdown message to its
"CreatorFeedback" outbox.
This component will terminate if a shutdownMicroprocess or producerFinished
message is sent to its "control" inbox. This message is forwarded onto the CSA.
Multicast_transceiver will then wait for the CSA to terminate. It then sends its
own shutdownMicroprocess message out of the "signal" outbox.
"""
import socket
import errno
import Axon
from Axon.util import Finality
from Axon.Ipc import producerFinished, shutdownMicroprocess
from Axon.Ipc import newComponent, status
from Kamaelia.IPC import socketShutdown, newCSA
from Kamaelia.IPC import newReader, newWriter
from Kamaelia.IPC import removeReader, removeWriter
#from Kamaelia.Internet.ConnectedSocketAdapter import ConnectedSocketAdapter
from ConnectedSocketAdapter import ConnectedSocketAdapter
#from Kamaelia.Internet.Selector import Selector
from Selector import Selector
class Multicast_transceiver(Axon.Component.component):
"""\
Multicast_transceiver(local_addr,local_port,remote_addr,remote_port) -> new Multicast_transceiver component.
Keyword arguments::
- local_addr -- address of the local interface to send to/receive from, usually "0.0.0.0"
- local_port -- port number to receive on
- remote_addr -- address of multicast group
- remote_port -- port number to send to
"""
Inboxes = { "inbox" : "data to send to the socket",
"_socketFeedback" : "notifications from the ConnectedSocketAdapter",
"control" : "Shutdown signalling"
}
Outboxes = { "outbox" : "data received from the socket",
"signal" : "socket errors",
"_selectorSignal" : "For registering and deregistering ConnectedSocketAdapter components with a selector service",
}
Usescomponents=[ConnectedSocketAdapter] # List of classes used.
def __init__(self,local_addr, local_port, remote_addr, remote_port):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(Multicast_transceiver, self).__init__()
self.local = (local_addr, local_port)
self.remote = (remote_addr, remote_port)
self.CSA = None
self.sock = None
def main(self):
"""Main loop."""
for v in self.runClient():
yield v
if (self.sock is not None) and (self.CSA is not None):
self.send(removeReader(self.CSA, self.sock), "_selectorSignal")
self.send(removeWriter(self.CSA, self.sock), "_selectorSignal")
def setupCSA(self, sock):
"""\
setupCSA(sock) -> new ConnectedSocketAdapter component
Creates a ConnectedSocketAdapter component for the socket, and wires up to
it. Also sends the CSA to the "selector" service.
"""
selectorService, selectorShutdownService, newSelector = Selector.getSelectorServices(self.tracker)
if newSelector:
self.addChildren(newSelector)
CSA = ConnectedSocketAdapter(sock, selectorService, sendTo=self.remote) # self.createConnectedSocket(sock)
self.addChildren(CSA)
self.link((self, "_selectorSignal"),selectorService)
self.link((CSA, "CreatorFeedback"),(self,"_socketFeedback"))
self.link((CSA, "outbox"), (self, "outbox"), passthrough=2)
self.link((self, "inbox"), (CSA, "inbox"), passthrough=1)
self.link((self, "control"), (CSA, "control"), passthrough=1) # propagate shutdown msgs
self.send(newReader(CSA, ((CSA, "ReadReady"), sock)), "_selectorSignal")
self.send(newWriter(CSA, ((CSA, "SendReady"), sock)), "_selectorSignal")
self.CSA = CSA # We need this for shutdown later
return self.childComponents()
def waitCSAClose(self):
"""Returns True if a socketShutdown message is received on "_socketFeedback" inbox."""
if self.dataReady("_socketFeedback"):
message = self.recv("_socketFeedback")
if isinstance(message, socketShutdown):
return False
return True
def safeConnect(self, sock):
"""\
Connect to socket and handle possible errors that may occur.
Returns True if successful, or False on failure. Unhandled errors are raised
as exceptions.
"""
try:
sock.bind(self.local) # Receive from server on this port
# EALREADY
# The socket is non-blocking and a previous connection
# attempt has not yet been completed.
self.connecting=0
return True
except socket.error, socket.msg:
(errorno, errmsg) = socket.msg.args
if errorno==errno.EALREADY:
# The socket is non-blocking and a previous connection attempt has not yet been completed
# We handle this by allowing the code to come back and repeatedly retry
# connecting. This is a valid, if brute force approach.
assert(self.connecting==1)
return False
if errorno==errno.EINPROGRESS or errorno==errno.EWOULDBLOCK:
#The socket is non-blocking and the connection cannot be completed immediately.
# We handle this by allowing the code to come back and repeatedly retry
# connecting. Rather brute force.
self.connecting=1
return False # Not connected should retry until no error
if errorno == errno.EISCONN:
# This is a windows error indicating the connection has already been made.
self.connecting = 0 # as with the no exception case.
return True
# Anything else is an error we don't handle
raise socket.msg
def runClient(self,sock=None):
# The various numbers yielded here indicate progress through the function, and
# nothing else specific.
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
yield 0.3
self.sock = sock # We need this for shutdown later
try:
sock.setblocking(0); yield 0.6
try:
while not self.safeConnect(sock):
if self.shutdown():
return
yield 1
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
status = sock.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(self.remote[0]) + socket.inet_aton("0.0.0.0"))
yield newComponent(*self.setupCSA(sock))
while self.waitCSAClose():
self.pause()
yield 2
raise Finality
except Exception, x:
result = sock.shutdown(2) ; yield 3
raise x # XXXX If X is not finality, an error message needs to get sent _somewhere_ else
# The logical place to send the error is to the signal outbox
except Exception, x:
sock.close() ; yield 4,x # XXXX If X is not finality, an error message needs to get sent _somewhere_ else
raise x
except Finality:
yield 5
except socket.error, e:
# We now do the flipside of setupCSA, whether we had an error or not
# A safe error relates to a disconnected server, and unsafe error is generally
# bad. However either way, it's gone, let's let the person using this
# component know, shutdown everything, and get outta here.
#
pass
self.send(shutdownMicroprocess(self), "signal")
# self.send(e, "signal")
# "TCPC: Exitting run client"
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (producerFinished,shutdownMicroprocess)):
return True
return False
__kamaelia_components__ = ( Multicast_transceiver, )
if __name__ =="__main__":
pass
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import functools
import itertools
import logging
import os
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger("luigi.server")
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility
# at some point (in particular this would force users to replace all dashes with underscores in the config)
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
prune_done_tasks = parameter.BoolParameter(default=False)
record_task_history = parameter.BoolParameter(default=False)
prune_on_get_work = parameter.BoolParameter(default=False)
def fix_time(x):
# Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects
# Let's remove this function soon
if isinstance(x, datetime.datetime):
return time.mktime(x.timetuple())
else:
return x
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and fix_time(self.failures[0]) < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None,
tracking_url=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.tracking_url = tracking_url
self.scheduler_disable_time = None
self.runnable = False
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
if (self.failures.first_failure_time is not None and
self.disable_hard_timeout):
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
return True
if self.failures.num_failures() >= self.disable_failures:
return True
return False
def can_disable(self):
return (self.disable_failures is not None or
self.disable_hard_timeout is not None)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.last_get_work = None
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference, get_work=False):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
if get_work:
self.last_get_work = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_pending_tasks(self, state):
"""
Get PENDING (and RUNNING) tasks for this worker.
You have to pass in the state for optimization reasons.
"""
if len(self.tasks) < state.num_pending_tasks():
return six.moves.filter(lambda task: task.status in [PENDING, RUNNING],
self.tasks)
else:
return state.get_pending_tasks()
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_pending_tasks(state))
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def get_state(self):
return self._tasks, self._active_workers
def set_state(self, state):
self._tasks, self._active_workers = state
def dump(self):
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(self.get_state(), fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from clean slate.")
return
self.set_state(state)
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
# Convert from old format
# TODO: this is really ugly, we need something more future-proof
# Every time we add an attribute to the Worker or Task class, this
# code needs to be updated
# Compatibility since 2014-06-02
for k, v in six.iteritems(self._active_workers):
if isinstance(v, float):
self._active_workers[k] = Worker(worker_id=k, last_active=v)
# Compatibility since 2015-05-28
if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)):
# If you load from an old format where Workers don't contain tasks.
for k, worker in six.iteritems(self._active_workers):
worker.tasks = set()
for task in six.itervalues(self._tasks):
for worker_id in task.workers:
self._active_workers[worker_id].tasks.add(task)
# Compatibility since 2015-04-28
if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)):
for t in six.itervalues(self._tasks):
t.disable_hard_timeout = None
else:
logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None and new_status != DISABLED:
return
if new_status == FAILED and task.can_disable() and task.status != DISABLED:
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
def fail_dead_worker_task(self, task, config, assistants):
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
def prune(self, task, config):
remove = False
# Mark tasks with no remaining active stakeholders for deletion
if not task.stakeholders:
if task.remove is None:
logger.info("Task %r has stakeholders %r but none remain connected -> will remove "
"task in %s seconds", task.id, task.stakeholders, config.remove_delay)
task.remove = time.time() + config.remove_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time is not None:
if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist:
self.re_enable(task, config)
# Remove tasks that have no stakeholders
if task.remove and time.time() > task.remove:
logger.info("Removing task %r (no connected stakeholders)", task.id)
remove = True
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
return remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None, last_get_work_gt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
last_get_work = getattr(worker, 'last_get_work', None)
if last_get_work_gt is not None and (
last_get_work is None or last_get_work <= last_get_work_gt):
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
# remove workers from tasks
for task in self.get_active_tasks():
task.stakeholders.difference_update(delete_workers)
task.workers.difference_update(delete_workers)
def get_necessary_tasks(self):
necessary_tasks = set()
for task in self.get_active_tasks():
if task.status not in (DONE, DISABLED) or \
getattr(task, 'scheduler_disable_time', None) is not None:
necessary_tasks.update(task.deps)
necessary_tasks.add(task.id)
return necessary_tasks
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_override: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
self._worker_requests = {}
def load(self):
self._state.load()
def dump(self):
self._state.dump()
def prune(self):
logger.info("Starting pruning of task graph")
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
if assistant_ids:
necessary_tasks = self._state.get_necessary_tasks()
else:
necessary_tasks = ()
for task in self._state.get_active_tasks():
self._state.fail_dead_worker_task(task, self._config, assistant_ids)
if task.id not in necessary_tasks and self._state.prune(task, self._config):
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
logger.info("Done pruning task graph")
def update(self, worker_id, worker_reference=None, get_work=False):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference, get_work=get_work)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, tracking_url=None, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
worker_id = kwargs['worker']
self.update(worker_id)
task = self._state.get_task(task_id, setdefault=self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params))
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if tracking_url is not None or task.status != RUNNING:
task.tracking_url = tracking_url
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if expl is not None:
task.expl = expl
if not (task.status == RUNNING and status == PENDING) or new_deps:
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = self._retry_time(task, self._config)
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
if runnable:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
task.runnable = runnable
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self, task):
"""
Return worker's rank function for task scheduling.
:return:
"""
return task.priority, -task.time
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def _retry_time(self, task, config):
return time.time() + config.retry_delay
def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
worker_id = kwargs['worker']
# Return remaining tasks that have no FAILED descendants
self.update(worker_id, {'host': host}, get_work=True)
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
best_task = None
if current_tasks is not None:
ct_set = set(current_tasks)
for task in sorted(self._state.get_running_tasks(), key=self._rank):
if task.worker_running == worker_id and task.id not in ct_set:
best_task = task
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
worker = self._state.get_worker(worker_id)
if worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_pending_tasks(self._state)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_pending_tasks()
used_resources = self._used_resources()
activity_limit = time.time() - self._config.worker_disconnect_delay
active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit)
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in active_workers)
tasks = list(relevant_tasks)
tasks.sort(key=self._rank, reverse=True)
for task in tasks:
upstream_status = self._upstream_status(task.id, upstream_table)
in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers
if task.status == RUNNING and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if best_task:
continue
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.time_running = time.time()
self._update_task_history(best_task, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
def ping(self, **kwargs):
worker_id = kwargs['worker']
self.update(worker_id)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
if self._state.has_task(dep_id):
dep = self._state.get_task(dep_id)
if dep.status == DONE:
continue
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack = task_stack + [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps]
upstream_status.append('') # to handle empty list
status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True):
task = self._state.get_task(task_id)
ret = {
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
'tracking_url': getattr(task, "tracking_url", None),
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps)
return ret
def graph(self, **kwargs):
self.prune()
serialized = {}
for task in self._state.get_active_tasks():
serialized[task.id] = self._serialize_task(task.id)
return serialized
def _recurse_deps(self, task_id, serialized):
if task_id not in serialized:
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.warn('Missing task for id [%s]', task_id)
# try to infer family and params from task_id
try:
family, _, param_str = task_id.rstrip(')').partition('(')
params = dict(param.split('=') for param in param_str.split(', '))
except BaseException:
family, params = '', {}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'priority': 0,
}
else:
serialized[task_id] = self._serialize_task(task_id)
for dep in task.deps:
self._recurse_deps(dep, serialized)
def dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._recurse_deps(task_id, serialized)
return serialized
def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
filter_func = lambda _: True
else:
terms = search.split()
filter_func = lambda t: all(term in t.id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
def inverse_dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._traverse_inverse_deps(task_id, serialized)
return serialized
def _traverse_inverse_deps(self, task_id, serialized):
stack = [task_id]
serialized[task_id] = self._serialize_task(task_id)
while len(stack) > 0:
curr_id = stack.pop()
for task in self._state.get_active_tasks():
if curr_id in task.deps:
serialized[curr_id]["deps"].append(task.id)
if task.id not in serialized:
serialized[task.id] = self._serialize_task(task.id)
serialized[task.id]["deps"] = []
stack.append(task.id)
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
return {"taskId": task_id, "error": self._state.get_task(task_id).expl}
else:
return {"taskId": task_id, "error": ""}
def _update_task_history(self, task, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task, successful)
elif status == PENDING:
self._task_history.task_scheduled(task)
elif status == RUNNING:
self._task_history.task_started(task, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
| |
import os
"""Constants"""
OPEN_FUNC = open
sep = os.sep
try:
import types
LIST_TYPE = types.ListType
except Exception:
LIST_TYPE = list
"""Helper Functions"""
def _is_list(e):
"""retruns true if *e* is a list type"""
return isinstance(e, LIST_TYPE)
def _to_list(e):
"""returns always a list containing *e*"""
return e if _is_list(e) else [e]
"""Filesystem Methods"""
def isfile(path, **kwargs):
"""Check if *path* is a file"""
import os.path
return os.path.isfile(path, **kwargs)
def isdir(path, **kwargs):
"""Check if *path* is a directory"""
import os.path
return os.path.isdir(path, **kwargs)
def rename(oldPath, newPath, **kwargs):
"""rename the file oldPath to newPath"""
import os
return os.rename(oldPath, newPath, **kwargs)
def copy(srcPath, destPath):
"""copy the file from srcPath to destPath"""
import shutils
return shutil.copy(srcPath, destPath)
def truncate(path, **kwargs):
"""remove all files and directories
from a directory *path*"""
rmfiles(list(path))
rmdirs(listdirs(path))
def chdir(path, **kwargs):
"""change current working directory"""
import os
return os.chdir(path, **kwargs)
def chown(path, user=None, group=None):
"""change ownership of path"""
import os
import pwd
import grp
uid = pwd.getpwnam(user).pw_uid if user else -1
gid = grp.getgrnam(group).gr_gid if group else -1
return os.chown(path, uid, gid)
def chmod(path, mode):
"""change pernmissions of path"""
import os, stat
st = os.stat(path)
return os.chmod(path, mode)
def link(srcPath, destPath):
"""create a hard link from srcPath to destPath"""
import os
return os.link(srcPath, destPath)
def symlink(srcPath, destPath):
"""create a symbolic link from srcPath to destPath"""
import os
return os.symlink(srcPath, destPath)
def stat(path):
"""Return file stats"""
import os
return os.stat(path)
def ctime(path):
"""platform dependent; time of most recent metadata change on Unix, or the time of creation on Windows"""
return stat(path).st_ctime
def atime(path):
"""Return time of most recent access"""
return stat(path).st_atime
def mtime(path):
"""time of most recent content modification"""
return stat(path).st_mtime
def mode(path):
"""Return the mode of the path"""
return stat(path).st_mode
def abspath(path, **kwargs):
"""Return the absolute path of *path*"""
import os.path
return os.path.abspath(path, **kwargs)
def normalize(path, **kwargs):
"""Return the normalized path of *path*"""
import os.path
return os.path.normpath(path, **kwargs)
def rm(path, **kwargs):
"""Remove the file *path*"""
import os
return os.unlink(path, **kwargs)
def unlink(*args, **kwargs):
"""Unix equivalent *unlink*"""
return rm(*args, **kwargs)
def rmdir(path, recursive=True, **kwargs):
"""Remove the directory *path*"""
if recursive:
import shutil
return shutil.rmtree(path, **kwargs)
else:
import os
return os.remdir(path, **kwargs)
def rmfiles(paths, **kwargs):
"""Remove an array of files *path*"""
for p in paths:
rm(p, **kwargs)
def rmdirs(paths, **kwargs):
"""Remove an array of files *path*"""
for p in paths:
rmdir(p, **kwargs)
def mkdir(path, recursive=True, **kwargs):
"""Unix equivalent *mkdir*"""
import os
if recursive:
os.makedirs(path, **kwargs)
else:
os.mkdir(path, **kwargs)
def touch(path):
"""Unix equivalent *touch*
@src: http://stackoverflow.com/a/1158096"""
import os
try:
OPEN_FUNC(path, 'a+').close()
except IOError:
os.utime(path, None)
def exists(path, **kwargs):
"""Check if file or directory exists"""
import os.path
return os.path.exists(path, **kwargs)
def access(path, **kwargs):
pass
def list(path='.'):
"""generator that returns all files of *path*"""
import os
for f in os.listdir(path):
if isfile(join(path, f)):
yield join(path, f) if path != '.' else f
def listdirs(path='.'):
"""generator that returns all directories of *path*"""
import os
for f in os.listdir(path):
if isdir(join(path, f)):
yield join(path, f) if path != '.' else f
def find(pattern, path='.', exclude=None, recursive=True):
"""Find files that match *pattern* in *path*"""
import fnmatch
import os
if recursive:
for root, dirnames, filenames in os.walk(path):
for pat in _to_list(pattern):
for filename in fnmatch.filter(filenames, pat):
filepath = join(abspath(root), filename)
for excl in _to_list(exclude):
if excl and fnmatch.fnmatch(filepath, excl):
break
else:
yield filepath
else:
for pat in _to_list(pattern):
for filename in fnmatch.filter(list(path), pat):
filepath = join(abspath(path), filename)
for excl in _to_list(exclude):
if excl and fnmatch.fnmatch(filepath, excl):
break
else:
yield filepath
def finddirs(pattern, path='.', exclude=None, recursive=True):
"""Find directories that match *pattern* in *path*"""
import fnmatch
import os
if recursive:
for root, dirnames, filenames in os.walk(path):
for pat in _to_list(pattern):
for dirname in fnmatch.filter(dirnames, pat):
dirpath = join(abspath(root), dirname)
for excl in _to_list(exclude):
if excl and fnmatch.fnmatch(dirpath, excl):
break
else:
yield dirpath
else:
for pat in _to_list(pattern):
for dirname in fnmatch.filter(listdirs(path), pat):
dirpath = join(abspath(path), dirname)
for excl in _to_list(exclude):
if excl and fnmatch.fnmatch(dirpath, excl):
break
else:
yield dirpath
def open(path, mode='r', **kwargs):
"""Open *content* to file *path*"""
return OPEN_FUNC(path, mode, **kwargs)
def write(path, content, encoding="UTF-8", append=False, raw=False):
"""Write *content* to file *path*"""
mode = 'wb' if not append else 'ab'
with OPEN_FUNC(path, mode) as _file:
if raw:
import shutil
shutil.copyfileobj(content, _file)
else:
_file.write(content.encode(encoding))
def read(path, encoding="UTF-8"):
"""Read and return content from file *path*"""
with OPEN_FUNC(path, 'rb') as _file:
cont = _file.read()
return cont.decode(encoding)
def get(path):
"""Read an object from file"""
try:
import cPickle as pickle
except:
import pickle
with open(path, 'rb') as file:
return pickle.load(file)
def put(path, obj):
"""Write an object to file"""
try:
import cPickle as pickle
except:
import pickle
with open(path, 'wb') as file:
return pickle.dump(obj, file)
def join(*args, **kwargs):
"""Join parts of a path together"""
import os.path
if _is_list(args[0]):
return os.path.join(*args[0])
return os.path.join(*args, **kwargs)
def cwd():
"""Get the current working directory"""
import os
return os.getcwd()
def home():
"""Get the home directory"""
from os.path import expanduser
return expanduser("~")
def extname(path, **kwargs):
"""Return the extension from *path*"""
import os.path
name, ext = os.path.splitext(path, **kwargs)
return ext
def basename(path, ext=""):
"""Return the file base name from *path*"""
import os.path
if ext is False:
return os.path.basename(path).replace(extname(path), "")
else:
return os.path.basename(path).replace(ext, "")
def dirname(path):
"""Return the directory name from *path*"""
import os.path
return os.path.dirname(path)
def add_suffix(path, suffix=""):
"""Adds a suffix to a filename *path*"""
return join(dirname(path), basename(path, ext=False) + suffix + extname(path))
def add_prefix(path, prefix=""):
"""Adds a suffix to a filename *path*"""
return join(dirname(path), prefix + basename(path))
def addpath(path):
"""Add *path* to system path"""
import sys
if not exists(path):
raise ValueError('Path %s does not exist' % path)
sys.path.insert(1, path)
""" Aliases """
def append(*args, **kwargs):
"""Alias for fs.write(append=True)"""
return write(*args, append=True, **kwargs)
def filename(*args, **kwargs):
"""Alias for fs.basename"""
return basename(*args, **kwargs)
def extension(*args, **kwargs):
"""Alias for fs.extname"""
return extname(*args, **kwargs)
def cd(*args, **kwargs):
"""Alias for fs.chdir"""
return chdir(*args, **kwargs)
| |
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
import sys
import os
import traceback
import logging
import traitlets.log
from jupyter_core.application import JupyterApp
from textwrap import dedent
from tornado.log import LogFormatter
from traitlets import Unicode, List, Bool, Instance, default
from traitlets.config.application import catch_config_error
from traitlets.config.loader import Config
from nbgrader.exchange import ExchangeFactory
from ..coursedir import CourseDirectory
from ..auth import Authenticator
from .. import preprocessors
from .. import plugins
from .. import exchange
from .. import converters
from traitlets.traitlets import MetaHasTraits
from typing import List as TypingList
from io import StringIO
from typing import Any
nbgrader_aliases = {
'log-level' : 'Application.log_level',
'student': 'CourseDirectory.student_id',
'assignment': 'CourseDirectory.assignment_id',
'notebook': 'CourseDirectory.notebook_id',
'db': 'CourseDirectory.db_url',
'course-dir': 'CourseDirectory.root'
}
nbgrader_flags = {
'debug': (
{'Application' : {'log_level' : 'DEBUG'}},
"set log level to DEBUG (maximize logging output)"
),
'quiet': (
{'Application' : {'log_level' : 'CRITICAL'}},
"set log level to CRITICAL (minimize logging output)"
),
}
def format_excepthook(etype, evalue, tb):
traceback.print_exception(etype, evalue, tb)
print(dedent(
"""
If you suspect this is a nbgrader bug, please report it at:
https://github.com/jupyter/nbgrader/issues
"""
), file=sys.stderr)
class NbGrader(JupyterApp):
"""A base class for all the nbgrader apps."""
aliases = nbgrader_aliases
flags = nbgrader_flags
_log_formatter_cls = LogFormatter
@default("log_level")
def _log_level_default(self) -> int:
return logging.INFO
@default("log_datefmt")
def _log_datefmt_default(self) -> str:
return "%Y-%m-%d %H:%M:%S"
@default("log_format")
def _log_format_default(self) -> str:
return "%(color)s[%(name)s | %(levelname)s]%(end_color)s %(message)s"
logfile = Unicode(
"",
help=dedent(
"""
Name of the logfile to log to. By default, log output is not written
to any file.
"""
)
).tag(config=True)
def init_logging(self,
handler_class: type,
handler_args: TypingList[StringIO],
color: bool = True,
subapps: bool = False) -> None:
handler = handler_class(*handler_args)
if color:
log_format = self.log_format
else:
log_format = self.log_format.replace("%(color)s", "").replace("%(end_color)s", "")
_formatter = self._log_formatter_cls(
fmt=log_format,
datefmt=self.log_datefmt)
handler.setFormatter(_formatter)
self.log.addHandler(handler)
if subapps and self.subapp:
self.subapp.init_logging(handler_class, handler_args, color=color, subapps=subapps)
def deinit_logging(self) -> None:
if len(self.log.handlers) > 1:
for handler in self.log.handlers[1:]:
handler.close()
self.log.removeHandler(handler)
coursedir = Instance(CourseDirectory, allow_none=True)
authenticator = Instance(Authenticator, allow_none=True)
exchange = Instance(ExchangeFactory, allow_none=True)
verbose_crash = Bool(False)
# The classes added here determine how configuration will be documented
classes = List()
@default("classes")
def _classes_default(self) -> TypingList[MetaHasTraits]:
return [ExchangeFactory, NbGrader, CourseDirectory]
def all_configurable_classes(self) -> TypingList[MetaHasTraits]:
"""Get a list of all configurable classes for nbgrader
"""
# Call explicitly the method on this class, to avoid infinite recursion
# when a subclass calls this method in _classes_default().
classes = NbGrader._classes_default(self)
# include the coursedirectory
classes.append(CourseDirectory)
# include the authenticator
classes.append(Authenticator)
# include all the apps that have configurable options
for _, (app, _) in self.subcommands.items():
if len(app.class_traits(config=True)) > 0:
classes.append(app)
# include plugins that have configurable options
for pg_name in plugins.__all__:
pg = getattr(plugins, pg_name)
if pg.class_traits(config=True):
classes.append(pg)
# include all preprocessors that have configurable options
for pp_name in preprocessors.__all__:
pp = getattr(preprocessors, pp_name)
if len(pp.class_traits(config=True)) > 0:
classes.append(pp)
# include all the exchange actions
for ex_name in exchange.__all__:
ex = getattr(exchange, ex_name)
if hasattr(ex, "class_traits") and ex.class_traits(config=True):
classes.append(ex)
# include all the default exchange actions
for ex_name in exchange.default.__all__:
ex = getattr(exchange, ex_name)
if hasattr(ex, "class_traits") and ex.class_traits(config=True):
classes.append(ex)
# include all the converters
for ex_name in converters.__all__:
ex = getattr(converters, ex_name)
if hasattr(ex, "class_traits") and ex.class_traits(config=True):
classes.append(ex)
return classes
@default("config_file_name")
def _config_file_name_default(self) -> str:
return u'nbgrader_config'
def _load_config(self, cfg: Config, **kwargs: Any) -> None:
if 'NbGraderConfig' in cfg:
self.log.warning(
"Use NbGrader in config, not NbGraderConfig. Outdated config:\n%s",
'\n'.join(
'NbGraderConfig.{key} = {value!r}'.format(key=key, value=value)
for key, value in cfg.NbGraderConfig.items()
)
)
cfg.NbGrader.merge(cfg.NbGraderConfig)
del cfg.NbGraderConfig
if 'BasicConfig' in cfg:
self.log.warning(
"Use NbGrader in config, not BasicConfig. Outdated config:\n%s",
'\n'.join(
'BasicConfig.{key} = {value!r}'.format(key=key, value=value)
for key, value in cfg.BasicConfig.items()
)
)
cfg.NbGrader.merge(cfg.BasicConfig)
del cfg.BasicConfig
if 'BaseNbGraderApp' in cfg:
self.log.warning(
"Use NbGrader in config, not BaseNbGraderApp. Outdated config:\n%s",
'\n'.join(
'BaseNbGraderApp.{key} = {value!r}'.format(key=key, value=value)
for key, value in cfg.BaseNbGraderApp.items()
)
)
cfg.NbGrader.merge(cfg.BaseNbGraderApp)
del cfg.BaseNbGraderApp
if 'BaseApp' in cfg:
self.log.warning(
"Use NbGrader in config, not BaseApp. Outdated config:\n%s",
'\n'.join(
'BaseApp.{key} = {value!r}'.format(key=key, value=value)
for key, value in cfg.BaseApp.items()
)
)
cfg.NbGrader.merge(cfg.BaseApp)
del cfg.BaseApp
coursedir_options = [
("student_id", "student_id"),
("assignment_id", "assignment_id"),
("notebook_id", "notebook_id"),
("directory_structure", "directory_structure"),
("source_directory", "source_directory"),
("release_directory", "release_directory"),
("submitted_directory", "submitted_directory"),
("autograded_directory", "autograded_directory"),
("feedback_directory", "feedback_directory"),
("solution_directory", "solution_directory"),
("db_url", "db_url"),
("course_directory", "root"),
("ignore", "ignore")
]
for old_opt, new_opt in coursedir_options:
if old_opt in cfg.NbGrader:
self.log.warning("Outdated config: use CourseDirectory.{} rather than NbGrader.{}".format(new_opt, old_opt))
setattr(cfg.CourseDirectory, new_opt, cfg.NbGrader[old_opt])
delattr(cfg.NbGrader, old_opt)
if "course_id" in cfg.NbGrader:
self.log.warning("Outdated config: use CourseDirectory.course_id rather than NbGrader.course_id")
cfg.CourseDirectory.course_id = cfg.NbGrader.course_id
del cfg.NbGrader.course_id
if "course_id" in cfg.Exchange:
self.log.warning("Outdated config: use CourseDirectory.course_id rather than Exchange.course_id")
cfg.CourseDirectory.course_id = cfg.Exchange.course_id
del cfg.Exchange.course_id
exchange_options = [
("timezone", "timezone"),
("timestamp_format", "timestamp_format"),
("exchange_directory", "root"),
("cache_directory", "cache")
]
for old_opt, new_opt in exchange_options:
if old_opt in cfg.TransferApp:
self.log.warning("Outdated config: use Exchange.{} rather than TransferApp.{}".format(new_opt, old_opt))
setattr(cfg.Exchange, new_opt, cfg.TransferApp[old_opt])
delattr(cfg.TransferApp, old_opt)
if 'TransferApp' in cfg and cfg.TransferApp:
self.log.warning(
"Use Exchange in config, not TransferApp. Outdated config:\n%s",
'\n'.join(
'TransferApp.{key} = {value!r}'.format(key=key, value=value)
for key, value in cfg.TransferApp.items()
)
)
cfg.Exchange.merge(cfg.TransferApp)
del cfg.TransferApp
if 'BaseNbConvertApp' in cfg:
self.log.warning(
"Use BaseConverter in config, not BaseNbConvertApp. Outdated config:\n%s",
'\n'.join(
'BaseNbConvertApp.{key} = {value!r}'.format(key=key, value=value)
for key, value in cfg.BaseNbConvertApp.items()
)
)
cfg.BaseConverter.merge(cfg.BaseNbConvertApp)
del cfg.BaseNbConvertApp
super(NbGrader, self)._load_config(cfg, **kwargs)
if self.coursedir:
self.coursedir._load_config(cfg)
def fail(self, msg, *args):
"""Log the error msg using self.log.error and exit using sys.exit(1)."""
self.log.error(msg, *args)
sys.exit(1)
def build_extra_config(self) -> Config:
return Config()
def excepthook(self, etype, evalue, tb):
format_excepthook(etype, evalue, tb)
@catch_config_error
def initialize(self, argv: TypingList[str] = None) -> None:
self.update_config(self.build_extra_config())
self.init_syspath()
self.coursedir = CourseDirectory(parent=self)
super(NbGrader, self).initialize(argv)
# load config that is in the coursedir directory
super(JupyterApp, self).load_config_file("nbgrader_config.py", path=self.coursedir.root)
if self.logfile:
self.init_logging(logging.FileHandler, [self.logfile], color=False)
def init_syspath(self) -> None:
"""Add the cwd to the sys.path ($PYTHONPATH)"""
sys.path.insert(0, os.getcwd())
def reset(self) -> None:
# stop logging
self.deinit_logging()
# recursively reset all subapps
if self.subapp:
self.subapp.reset()
# clear the instance
self.clear_instance()
traitlets.log._logger = None
def print_subcommands(self):
for key, (app, desc) in self.subcommands.items():
print(" {}\n{}\n".format(key, desc))
def load_config_file(self, **kwargs: Any) -> None:
"""Load the config file.
By default, errors in loading config are handled, and a warning
printed on screen. For testing, the suppress_errors option is set
to False, so errors will make tests fail.
"""
if self.config_file:
paths = [os.path.abspath("{}.py".format(self.config_file))]
else:
paths = [os.path.join(x, "{}.py".format(self.config_file_name)) for x in self.config_file_paths]
if not any(os.path.exists(x) for x in paths):
self.log.warning("No nbgrader_config.py file found (rerun with --debug to see where nbgrader is looking)")
super(NbGrader, self).load_config_file(**kwargs)
def start(self) -> None:
super(NbGrader, self).start()
self.authenticator = Authenticator(parent=self)
self.exchange = ExchangeFactory(parent=self)
| |
''' Testing for orientations module '''
import numpy as np
from nose.tools import assert_true, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nipy.io.imageformats.orientations import (io_orientation, orientation_affine,
flip_axis, _ornt_to_affine,
apply_orientation, OrientationError)
from nipy.io.imageformats.testing import parametric
IN_ARRS = [np.eye(4),
[[0,0,1,0],
[0,1,0,0],
[1,0,0,0],
[0,0,0,1]],
[[0,1,0,0],
[0,0,1,0],
[1,0,0,0],
[0,0,0,1]],
[[3,1,0,0],
[1,3,0,0],
[0,0,1,0],
[0,0,0,1]],
[[1,3,0,0],
[3,1,0,0],
[0,0,1,0],
[0,0,0,1]],
]
OUT_ORNTS = [[[0,1],
[1,1],
[2,1]],
[[2,1],
[1,1],
[0,1]],
[[2,1],
[0,1],
[1,1]],
[[0,1],
[1,1],
[2,1]],
[[1,1],
[0,1],
[2,1]],
]
IN_ARRS = IN_ARRS + [[[np.cos(np.pi/6+i*np.pi/2),np.sin(np.pi/6+i*np.pi/2),0,0],
[-np.sin(np.pi/6+i*np.pi/2),np.cos(np.pi/6+i*np.pi/2),0,0],
[0,0,1,0],
[0,0,0,1]] for i in range(4)]
OUT_ORNTS = OUT_ORNTS + [[[0,1],
[1,1],
[2,1]],
[[1,-1],
[0,1],
[2,1]],
[[0,-1],
[1,-1],
[2,1]],
[[1,1],
[0,-1],
[2,1]]
]
IN_ARRS = [np.array(arr) for arr in IN_ARRS]
OUT_ORNTS = [np.array(ornt) for ornt in OUT_ORNTS]
def same_transform(taff, ornt, shape):
# Applying transformations implied by `ornt` to a made-up array
# ``arr`` of shape `shape`, results in ``t_arr``. When the point
# indices from ``arr`` are transformed by (the inverse of) `taff`,
# and we index into ``t_arr`` with these transformed points, then we
# should get the same values as we would from indexing into arr with
# the untransformed points.
shape = np.array(shape)
size = np.prod(shape)
arr = np.arange(size).reshape(shape)
# apply ornt transformations
t_arr = apply_orientation(arr, ornt)
# get all point indices in arr
i,j,k = shape
arr_pts = np.mgrid[:i,:j,:k].reshape((3,-1))
# inverse of taff takes us from point index in arr to point index in
# t_arr
itaff = np.linalg.inv(taff)
# apply itaff so that points indexed in t_arr should correspond
o2t_pts = np.dot(itaff[:3,:3], arr_pts) + itaff[:3,3][:,None]
assert np.allclose(np.round(o2t_pts), o2t_pts)
# fancy index out the t_arr values
vals = t_arr[list(o2t_pts.astype('i'))]
return np.all(vals == arr.ravel())
@parametric
def test_apply():
# most tests are in ``same_transform`` above, via the
# test_io_orientations.
a = np.arange(24).reshape((2,3,4))
# Test 4D
t_arr = apply_orientation(a[:,:,:,None], ornt)
yield assert_equal(t_arr.ndim, 4)
# Orientation errors
yield assert_raises(OrientationError,
apply_orientation,
a[:,:,1], ornt)
yield assert_raises(OrientationError,
apply_orientation,
a,
[[0,1],[np.nan,np.nan],[2,1]])
@parametric
def test_flip_axis():
a = np.arange(24).reshape((2,3,4))
yield assert_array_equal(
flip_axis(a),
np.flipud(a))
yield assert_array_equal(
flip_axis(a, axis=0),
np.flipud(a))
yield assert_array_equal(
flip_axis(a, axis=1),
np.fliplr(a))
# check accepts array-like
yield assert_array_equal(
flip_axis(a.tolist(), axis=0),
np.flipud(a))
# third dimension
b = a.transpose()
b = np.flipud(b)
b = b.transpose()
yield assert_array_equal(flip_axis(a, axis=2), b)
@parametric
def test_io_orientation():
shape = (2,3,4)
for in_arr, out_ornt in zip(IN_ARRS, OUT_ORNTS):
ornt = io_orientation(in_arr)
yield assert_array_equal(ornt, out_ornt)
taff = orientation_affine(ornt, shape)
yield assert_true(same_transform(taff, ornt, shape))
for axno in range(3):
arr = in_arr.copy()
ex_ornt = out_ornt.copy()
# flip the input axis in affine
arr[:,axno] *= -1
# check that result shows flip
ex_ornt[axno, 1] *= -1
ornt = io_orientation(arr)
yield assert_array_equal(ornt, ex_ornt)
taff = orientation_affine(ornt, shape)
yield assert_true(same_transform(taff, ornt, shape))
def test_drop_coord():
# given a 5x4 affine from slicing an fmri,
# the orientations code should easily reorder and drop the t
# axis
# this affine has output coordinates '-y','z','x' and is at t=16
sliced_fmri_affine = np.array([[0,-1,0,3],
[0,0,2,5],
[3,0,0,4],
[0,0,0,16],
[0,0,0,1]])
ornt = io_orientation(sliced_fmri_affine)
affine_that_drops_t_reorders_and_flips = _ornt_to_affine(ornt)
final_affine = np.dot(affine_that_drops_t_reorders_and_flips,
sliced_fmri_affine)
# the output will be diagonal
# with the 'y' row having been flipped and the 't' row dropped
assert_array_equal(final_affine,
np.array([[3,0,0,4],
[0,1,0,-3],
[0,0,2,5],
[0,0,0,1]]))
@parametric
def test_ornt_to_affine():
# this orientation indicates that the first output
# axis of the affine is closest to the vector [0,0,-1],
# the last is closest to [1,0,0] and
# that the y coordinate ([0,1,0]) is dropped
ornt = [[2,-1],
[np.nan,np.nan],
[0,1]]
# the reordering/flipping is represented by an affine that
# takes the 3rd output coordinate and maps it to the
# first, takes the 3rd, maps it to first and flips it
A = np.array([[0,0,-1,0],
[1,0,0,0],
[0,0,0,1]])
yield assert_array_equal(A, _ornt_to_affine(ornt))
# a more complicated example. only the 1st, 3rd and 6th
# coordinates appear in the output
ornt = [[3,-1],
[np.nan,np.nan],
[0,1],
[np.nan,np.nan],
[np.nan,np.nan],
[1,-1]]
B = np.array([[0,0,0,-1,0,0,0],
[1,0,0,0,0,0,0],
[0,-1,0,0,0,0,0],
[0,0,0,0,0,0,1]])
yield assert_array_equal(B, _ornt_to_affine(ornt))
| |
"""JSON Web Signature."""
import argparse
import base64
import sys
import OpenSSL
import six
from josepy import b64, errors, json_util, jwa, jwk, util
class MediaType(object):
"""MediaType field encoder/decoder."""
PREFIX = 'application/'
"""MIME Media Type and Content Type prefix."""
@classmethod
def decode(cls, value):
"""Decoder."""
# 4.1.10
if '/' not in value:
if ';' in value:
raise errors.DeserializationError('Unexpected semi-colon')
return cls.PREFIX + value
return value
@classmethod
def encode(cls, value):
"""Encoder."""
# 4.1.10
if ';' not in value:
assert value.startswith(cls.PREFIX)
return value[len(cls.PREFIX):]
return value
class Header(json_util.JSONObjectWithFields):
"""JOSE Header.
.. warning:: This class supports **only** Registered Header
Parameter Names (as defined in section 4.1 of the
protocol). If you need Public Header Parameter Names (4.2)
or Private Header Parameter Names (4.3), you must subclass
and override :meth:`from_json` and :meth:`to_partial_json`
appropriately.
.. warning:: This class does not support any extensions through
the "crit" (Critical) Header Parameter (4.1.11) and as a
conforming implementation, :meth:`from_json` treats its
occurrence as an error. Please subclass if you seek for
a different behaviour.
:ivar x5tS256: "x5t#S256"
:ivar str typ: MIME Media Type, inc. :const:`MediaType.PREFIX`.
:ivar str cty: Content-Type, inc. :const:`MediaType.PREFIX`.
"""
alg = json_util.Field(
'alg', decoder=jwa.JWASignature.from_json, omitempty=True)
jku = json_util.Field('jku', omitempty=True)
jwk = json_util.Field('jwk', decoder=jwk.JWK.from_json, omitempty=True)
kid = json_util.Field('kid', omitempty=True)
x5u = json_util.Field('x5u', omitempty=True)
x5c = json_util.Field('x5c', omitempty=True, default=())
x5t = json_util.Field(
'x5t', decoder=json_util.decode_b64jose, omitempty=True)
x5tS256 = json_util.Field(
'x5t#S256', decoder=json_util.decode_b64jose, omitempty=True)
typ = json_util.Field('typ', encoder=MediaType.encode,
decoder=MediaType.decode, omitempty=True)
cty = json_util.Field('cty', encoder=MediaType.encode,
decoder=MediaType.decode, omitempty=True)
crit = json_util.Field('crit', omitempty=True, default=())
def not_omitted(self):
"""Fields that would not be omitted in the JSON object."""
return dict((name, getattr(self, name))
for name, field in six.iteritems(self._fields)
if not field.omit(getattr(self, name)))
def __add__(self, other):
if not isinstance(other, type(self)):
raise TypeError('Header cannot be added to: {0}'.format(
type(other)))
not_omitted_self = self.not_omitted()
not_omitted_other = other.not_omitted()
if set(not_omitted_self).intersection(not_omitted_other):
raise TypeError('Addition of overlapping headers not defined')
not_omitted_self.update(not_omitted_other)
return type(self)(**not_omitted_self) # pylint: disable=star-args
def find_key(self):
"""Find key based on header.
.. todo:: Supports only "jwk" header parameter lookup.
:returns: (Public) key found in the header.
:rtype: .JWK
:raises josepy.errors.Error: if key could not be found
"""
if self.jwk is None:
raise errors.Error('No key found')
return self.jwk
@crit.decoder
def crit(unused_value):
# pylint: disable=missing-docstring,no-self-argument,no-self-use
raise errors.DeserializationError(
'"crit" is not supported, please subclass')
# x5c does NOT use JOSE Base64 (4.1.6)
@x5c.encoder # type: ignore
def x5c(value): # pylint: disable=missing-docstring,no-self-argument
return [base64.b64encode(OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1, cert.wrapped)) for cert in value]
@x5c.decoder # type: ignore
def x5c(value): # pylint: disable=missing-docstring,no-self-argument
try:
return tuple(util.ComparableX509(OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
base64.b64decode(cert))) for cert in value)
except OpenSSL.crypto.Error as error:
raise errors.DeserializationError(error)
class Signature(json_util.JSONObjectWithFields):
"""JWS Signature.
:ivar combined: Combined Header (protected and unprotected,
:class:`Header`).
:ivar unicode protected: JWS protected header (Jose Base-64 decoded).
:ivar header: JWS Unprotected Header (:class:`Header`).
:ivar str signature: The signature.
"""
header_cls = Header
__slots__ = ('combined',)
protected = json_util.Field('protected', omitempty=True, default='')
header = json_util.Field(
'header', omitempty=True, default=header_cls(),
decoder=header_cls.from_json)
signature = json_util.Field(
'signature', decoder=json_util.decode_b64jose,
encoder=json_util.encode_b64jose)
@protected.encoder # type: ignore
def protected(value): # pylint: disable=missing-docstring,no-self-argument
# wrong type guess (Signature, not bytes) | pylint: disable=no-member
return json_util.encode_b64jose(value.encode('utf-8'))
@protected.decoder # type: ignore
def protected(value): # pylint: disable=missing-docstring,no-self-argument
return json_util.decode_b64jose(value).decode('utf-8')
def __init__(self, **kwargs):
if 'combined' not in kwargs:
kwargs = self._with_combined(kwargs)
super(Signature, self).__init__(**kwargs)
assert self.combined.alg is not None
@classmethod
def _with_combined(cls, kwargs):
assert 'combined' not in kwargs
header = kwargs.get('header', cls._fields['header'].default)
protected = kwargs.get('protected', cls._fields['protected'].default)
if protected:
combined = header + cls.header_cls.json_loads(protected)
else:
combined = header
kwargs['combined'] = combined
return kwargs
@classmethod
def _msg(cls, protected, payload):
return (b64.b64encode(protected.encode('utf-8')) + b'.' +
b64.b64encode(payload))
def verify(self, payload, key=None):
"""Verify.
:param JWK key: Key used for verification.
"""
key = self.combined.find_key() if key is None else key
return self.combined.alg.verify(
key=key.key, sig=self.signature,
msg=self._msg(self.protected, payload))
@classmethod
def sign(cls, payload, key, alg, include_jwk=True,
protect=frozenset(), **kwargs):
"""Sign.
:param JWK key: Key for signature.
"""
assert isinstance(key, alg.kty)
header_params = kwargs
header_params['alg'] = alg
if include_jwk:
header_params['jwk'] = key.public_key()
assert set(header_params).issubset(cls.header_cls._fields)
assert protect.issubset(cls.header_cls._fields)
protected_params = {}
for header in protect:
if header in header_params:
protected_params[header] = header_params.pop(header)
if protected_params:
# pylint: disable=star-args
protected = cls.header_cls(**protected_params).json_dumps()
else:
protected = ''
header = cls.header_cls(**header_params) # pylint: disable=star-args
signature = alg.sign(key.key, cls._msg(protected, payload))
return cls(protected=protected, header=header, signature=signature)
def fields_to_partial_json(self):
fields = super(Signature, self).fields_to_partial_json()
if not fields['header'].not_omitted():
del fields['header']
return fields
@classmethod
def fields_from_json(cls, jobj):
fields = super(Signature, cls).fields_from_json(jobj)
fields_with_combined = cls._with_combined(fields)
if 'alg' not in fields_with_combined['combined'].not_omitted():
raise errors.DeserializationError('alg not present')
return fields_with_combined
class JWS(json_util.JSONObjectWithFields):
"""JSON Web Signature.
:ivar str payload: JWS Payload.
:ivar str signature: JWS Signatures.
"""
__slots__ = ('payload', 'signatures')
signature_cls = Signature
def verify(self, key=None):
"""Verify."""
return all(sig.verify(self.payload, key) for sig in self.signatures)
@classmethod
def sign(cls, payload, **kwargs):
"""Sign."""
return cls(payload=payload, signatures=(
cls.signature_cls.sign(payload=payload, **kwargs),))
@property
def signature(self):
"""Get a singleton signature.
:rtype: :class:`JWS.signature_cls`
"""
assert len(self.signatures) == 1
return self.signatures[0]
def to_compact(self):
"""Compact serialization.
:rtype: bytes
"""
assert len(self.signatures) == 1
assert 'alg' not in self.signature.header.not_omitted()
# ... it must be in protected
return (
b64.b64encode(self.signature.protected.encode('utf-8')) +
b'.' +
b64.b64encode(self.payload) +
b'.' +
b64.b64encode(self.signature.signature))
@classmethod
def from_compact(cls, compact):
"""Compact deserialization.
:param bytes compact:
"""
try:
protected, payload, signature = compact.split(b'.')
except ValueError:
raise errors.DeserializationError(
'Compact JWS serialization should comprise of exactly'
' 3 dot-separated components')
sig = cls.signature_cls(
protected=b64.b64decode(protected).decode('utf-8'),
signature=b64.b64decode(signature))
return cls(payload=b64.b64decode(payload), signatures=(sig,))
def to_partial_json(self, flat=True): # pylint: disable=arguments-differ
assert self.signatures
payload = json_util.encode_b64jose(self.payload)
if flat and len(self.signatures) == 1:
ret = self.signatures[0].to_partial_json()
ret['payload'] = payload
return ret
else:
return {
'payload': payload,
'signatures': self.signatures,
}
@classmethod
def from_json(cls, jobj):
if 'signature' in jobj and 'signatures' in jobj:
raise errors.DeserializationError('Flat mixed with non-flat')
elif 'signature' in jobj: # flat
return cls(payload=json_util.decode_b64jose(jobj.pop('payload')),
signatures=(cls.signature_cls.from_json(jobj),))
else:
return cls(payload=json_util.decode_b64jose(jobj['payload']),
signatures=tuple(cls.signature_cls.from_json(sig)
for sig in jobj['signatures']))
class CLI(object):
"""JWS CLI."""
@classmethod
def sign(cls, args):
"""Sign."""
key = args.alg.kty.load(args.key.read())
args.key.close()
if args.protect is None:
args.protect = []
if args.compact:
args.protect.append('alg')
sig = JWS.sign(payload=sys.stdin.read().encode(), key=key, alg=args.alg,
protect=set(args.protect))
if args.compact:
six.print_(sig.to_compact().decode('utf-8'))
else: # JSON
six.print_(sig.json_dumps_pretty())
@classmethod
def verify(cls, args):
"""Verify."""
if args.compact:
sig = JWS.from_compact(sys.stdin.read().encode())
else: # JSON
try:
sig = JWS.json_loads(sys.stdin.read())
except errors.Error as error:
six.print_(error)
return -1
if args.key is not None:
assert args.kty is not None
key = args.kty.load(args.key.read()).public_key()
args.key.close()
else:
key = None
sys.stdout.write(sig.payload)
return not sig.verify(key=key)
@classmethod
def _alg_type(cls, arg):
return jwa.JWASignature.from_json(arg)
@classmethod
def _header_type(cls, arg):
assert arg in Signature.header_cls._fields
return arg
@classmethod
def _kty_type(cls, arg):
assert arg in jwk.JWK.TYPES
return jwk.JWK.TYPES[arg]
@classmethod
def run(cls, args=None):
"""Parse arguments and sign/verify."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument('--compact', action='store_true')
subparsers = parser.add_subparsers()
parser_sign = subparsers.add_parser('sign')
parser_sign.set_defaults(func=cls.sign)
parser_sign.add_argument(
'-k', '--key', type=argparse.FileType('rb'), required=True)
parser_sign.add_argument(
'-a', '--alg', type=cls._alg_type, default=jwa.RS256)
parser_sign.add_argument(
'-p', '--protect', action='append', type=cls._header_type)
parser_verify = subparsers.add_parser('verify')
parser_verify.set_defaults(func=cls.verify)
parser_verify.add_argument(
'-k', '--key', type=argparse.FileType('rb'), required=False)
parser_verify.add_argument(
'--kty', type=cls._kty_type, required=False)
parsed = parser.parse_args(args)
return parsed.func(parsed)
if __name__ == '__main__':
exit(CLI.run()) # pragma: no cover
| |
import getpass
import json
import logging
import logging.config
import os
import sys
import pymterm
GUI_RENDERS = ["cairo", "pygame", "native"]
PYGLET_RENDERS = ["pyglet"]
RENDERS = GUI_RENDERS + ["kivy", "console"] + PYGLET_RENDERS
def get_default_user():
return getpass.getuser()
class SessionConfig:
def __init__(self, args):
self.term_name = args.term_name
self.session_name = args.session
self.port = args.port if args.port is not None else 22
self.is_logging = args.log is not None
self.log_file_path = args.log
self.hostname = ''
self.username = ''
self.default_foreground_color = [0x00,0x00,0x00,0x88]
self.default_background_color = [0xdd,0xdd,0xdd,0xFF]
self.default_cursor_color = self.default_foreground_color
self.color_theme = args.color_theme
self.debug = args.debug
self.debug_more = args.debug_more
self.session_type = args.session_type
self.config = args.config
self.password = None
self.render = None
self.gui_renders = GUI_RENDERS
self.renders = RENDERS
self.font_file = args.font_file
self.font_name = args.font_name
self.font_size = args.font_size
self.dump_data = args.dump_data
self.load_data = args.load_data
self.send_envs = args.send_envs
self.use_ssh_config = args.use_ssh_config
self.load_config()
if self.dump_data:
try:
f = open(self.dump_data, "w")
f.close()
except:
logging.exception("unable to create dump file")
raise ValueError('Unable to write dump data to file:{}'.format(self.dump_data))
if self.load_data and not os.access(self.load_data, os.R_OK):
raise ValueError('Unable to read dump data from file:{}'.format(self.load_data))
if args.render:
self.render = args.render
elif 'render' in self.config:
render = self.config['render']
if 'default' in render:
self.render = render['default'] if render['default'] in self.renders else None
if self.debug_more:
self.debug = True
self.color_table = []
if args.conn_str:
self.set_conn_str(args.conn_str)
elif not self.session_name:
if self.render and self.render == 'console':
raise ValueError("no engouth connect information")
#validate host and user
if not self.session_name and (len(self.hostname) == 0 or len(self.username) == 0):
if self.session_type == 'ssh':
raise ValueError("no engouth connect information")
if not args.conn_str:
self.config_session()
if self.session_type == 'pipe':
if self.config and 'pipe-config' in self.config and 'default-shell' in self.config['pipe-config']:
pass
else:
raise ValueError('no default shell configured for pipe mode')
default_formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(name)-15s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
root_logger = logging.getLogger('')
root_logger.setLevel(logging.WARN)
console_handler = logging.StreamHandler()
console_handler.setFormatter(default_formatter)
root_logger.addHandler(console_handler)
if self.is_logging or self.debug:
root_logger.setLevel(logging.DEBUG if self.debug else logging.INFO)
if self.debug:
console_handler.setLevel(logging.DEBUG)
if self.is_logging:
file_handler = logging.handlers.TimedRotatingFileHandler(self.log_file_path,
when='D',
backupCount=1,
interval=1)
file_handler.setFormatter(default_formatter)
file_handler.setLevel(logging.DEBUG if self.debug else logging.INFO)
root_logger.addHandler(file_handler)
#init color table
self.init_color_table()
#update global debug mode
pymterm.debug_log = self.debug
pymterm.debug_more_log = self.debug_more
COLOR_SET_0_RATIO = 0x44
COLOR_SET_1_RATIO = 0xaa
#ansi color
COLOR_TABLE = [
[0, 0, 0, 0xFF], #BLACK
[COLOR_SET_0_RATIO, 0, 0, 0xFF], #RED
[0, COLOR_SET_0_RATIO, 0, 0xFF], #GREEN
[COLOR_SET_0_RATIO, COLOR_SET_0_RATIO, 0, 0xFF], #BROWN
[0, 0, COLOR_SET_0_RATIO, 0xFF], #BLUE
[COLOR_SET_0_RATIO, 0, COLOR_SET_0_RATIO, 0xFF], #MAGENTA
[0, COLOR_SET_0_RATIO, COLOR_SET_0_RATIO, 0xFF], #CYAN
[COLOR_SET_0_RATIO, COLOR_SET_0_RATIO, COLOR_SET_0_RATIO, 0xFF], #LIGHT GRAY
[COLOR_SET_1_RATIO, COLOR_SET_1_RATIO, COLOR_SET_1_RATIO, 0xFF], #DARK_GREY
[0xFF, COLOR_SET_1_RATIO, COLOR_SET_1_RATIO, 0xFF], #RED
[COLOR_SET_1_RATIO, 0xFF, COLOR_SET_1_RATIO, 0xFF], #GREEN
[0xFF, 0xFF, COLOR_SET_1_RATIO, 0xFF], #YELLOW
[COLOR_SET_1_RATIO, COLOR_SET_1_RATIO, 0xFF, 0xFF], #BLUE
[0xFF, COLOR_SET_1_RATIO, 0xFF, 0xFF], #MAGENTA
[COLOR_SET_1_RATIO, 0xFF, 0xFF, 0xFF], #CYAN
[0xFF, 0xFF, 0xFF, 0xFF], #WHITE
]
def init_color_table(self):
#copy default table
self.default_foreground_color = [0x00,0x00,0x00,0x88]
self.default_background_color = [0xdd,0xdd,0xdd,0xFF]
self.default_cursor_color = self.default_foreground_color
self.color_table = [c[:] for c in SessionConfig.COLOR_TABLE]
for i in range(240):
if i < 216:
r = i / 36
g = (i / 6) % 6
b = i % 6
self.color_table.append([r * 40 + 55 if r > 0 else 0,
g * 40 + 55 if g > 0 else 0,
b * 40 + 55 if b > 0 else 0,
0xFF])
else:
shade = (i - 216) * 10 + 8
self.color_table.append([shade,
shade,
shade,
0xFF])
#load config
if self.color_theme:
from colour.color_manager import get_color_theme
color_theme = get_color_theme(self.color_theme)
if color_theme:
if not color_theme.apply_color(self, self.color_table):
self.default_foreground_color = self.color_table[7]
self.default_background_color = self.color_table[0]
#init render color table
self.render_color_table = self.color_table[:]
def get_color(self, idx):
return self.render_color_table[idx]
def clone(self):
import copy
c = copy.deepcopy(self)
c.init_color_table()
return c
def set_conn_str(self, conn_str):
parts = conn_str.split('@')
if len(parts) >= 2:
self.hostname = '@'.join(parts[1:])
self.username = parts[0]
else:
self.hostname = conn_str
self.username = get_default_user()
def get_conn_str(self):
return ''.join([self.username, '@', self.hostname, ':', str(self.port)])
def load_config(self):
config_path, need_find_config = (self.config, False) if self.config else ('pymterm.json', True)
if need_find_config:
config_path = self.find_config(config_path)
if not os.path.exists(config_path):
raise ValueError('unable to find the config file:{}'.format(config_path))
with open(config_path) as f:
self.config = json.load(f)
def find_config(self, p):
if os.path.exists(p):
return p
import appdirs
dirs = [appdirs.user_config_dir('pymterm'),
os.path.dirname(__file__),
os.path.join(os.path.dirname(__file__), '..')]
for d in dirs:
pp = os.path.join(d, p)
if os.path.exists(pp):
return pp
return p
def config_session(self):
if self.session_name:
if not self.config or not 'sessions' in self.config or not self.session_name in self.config['sessions']:
raise ValueError("unable to find the session:{}".format(self.session_name))
#set session config
session = self.config['sessions'][self.session_name]
if not 'conn_str' in session:
raise ValueError("unable to find connection string for the session:{}".format(self.session_name))
self.set_conn_str(session['conn_str'])
if 'port' in session:
self.port = session['port']
else:
self.port = 22
if 'password' in session:
self.password = session['password']
else:
self.password = None
self.session_type = 'ssh'
def get_session_names(self):
if not 'sessions' in self.config:
return []
return [name for name in self.config['sessions']]
def get_font_info(self):
font_size = 17
font_file = None
font_name = 'Monospace'
font_dir = None
def norm_font_file(f, f_dir = None):
if f is None:
return None
f = os.path.expandvars(os.path.expanduser(f))
f_dir = os.path.expandvars(os.path.expanduser(f_dir)) if f_dir else None
if os.path.isfile(f):
return f
if not os.path.isabs(f):
f = os.path.join(f_dir if f_dir else '.', f)
return f if os.path.isfile(f) else None
config = self.config
if config and 'font' in config:
font_config = config['font']
if 'name' in font_config:
font_name = font_config['name']
if 'font_file' in font_config:
font_file = font_config['font_file']
if 'size' in font_config:
font_size = font_config['size']
if 'font_dir' in font_config:
font_dir = font_config['font_dir']
if self.font_size:
font_size = self.font_size
if self.font_name:
font_name = self.font_name
if self.font_file:
font_file = self.font_file
font_file = norm_font_file(font_file, font_dir)
logging.info('font info:file=[{}], name=[{}], size={}'.format(font_file, font_name, font_size))
return (font_file, font_name, font_size)
def load_send_envs(self, send_envs):
keys, _vars = [], {}
if not send_envs:
return (keys, _vars)
for env in send_envs:
if '=' in env:
parts=env.split('=')
_vars[parts[0]] = '='.join(parts[1:])
else:
keys.append(env)
return (keys, _vars)
def read_ssh_config_file(self, config_file, hostname):
files = []
if config_file == '__pymterm_use_sys_default_config_file__':
files.extend(['~/.ssh/ssh_config', '/etc/ssh/ssh_config'])
else:
files.append(config_file)
for file_path in files:
fp = os.path.expandvars(os.path.expanduser(file_path))
if os.access(fp, os.R_OK):
import paramiko
try:
ssh_config = paramiko.SSHConfig()
with open(fp) as f:
ssh_config.parse(f)
return ssh_config.lookup(hostname)
except:
logging.exception('read ssh_config file failed:{}'.format(fp))
return None
def get_ssh_config(self, session, hostname):
ssh_config = None
if self.use_ssh_config:
ssh_config = self.read_ssh_config_file(self.use_ssh_config, hostname)
if not ssh_config and session:
if 'use_ssh_config' in session:
ssh_config = self.read_ssh_config_file(session['use_ssh_config'], hostname)
if not ssh_config and 'use_system_ssh_config' in session and session['use_system_ssh_config']:
ssh_config = self.read_ssh_config_file('__pymterm_use_sys_default_config_file__', hostname)
if not ssh_config:
if 'use_ssh_config' in self.config:
ssh_config = self.read_ssh_config_file(self.config['use_ssh_config'], hostname)
if not ssh_config and 'use_system_ssh_config' in self.config and self.config['use_system_ssh_config']:
ssh_config = self.read_ssh_config_file('__pymterm_use_sys_default_config_file__', hostname)
return ssh_config
def get_envs(self, keys):
import fnmatch
envs = {}
for key in os.environ:
for key2 in keys:
if fnmatch.fnmatch(key, key2):
envs[key] = os.environ[key]
return envs
def get_conn_info(self):
if not self.hostname or len(self.hostname) == 0:
return ({}, None) #envs, proxy command
envs = {}
proxy_command = None
session = None
if (self.session_name
and self.config
and 'sessions' in self.config
and self.session_name in self.config['sessions']):
session = self.config['sessions'][self.session_name]
ssh_config = self.get_ssh_config(session, self.hostname)
if ssh_config:
if 'proxycommand' in ssh_config:
proxy_command = ssh_config['proxycommand']
if 'sendenv' in ssh_config:
keys, _vars = self.load_send_envs(ssh_config['sendenv'].split(' '))
envs.update(self.get_envs(keys))
envs.update(_vars)
#global envs
if 'send_envs' in self.config:
keys, _vars = self.load_send_envs(self.config['send_envs'])
envs.update(self.get_envs(keys))
envs.update(_vars)
#session envs
if session and 'send_envs' in session:
keys, _vars = self.load_send_envs(session['send_envs'])
envs.update(self.get_envs(keys))
envs.update(_vars)
#cmd envs
if self.send_envs:
keys, _vars = self.load_send_envs(self.send_envs)
envs.update(self.get_envs(keys))
envs.update(_vars)
return (envs, proxy_command)
def update_color_for_render(self, gen_color_func):
for i in range(len(self.render_color_table)):
self.render_color_table[i] = gen_color_func(self.render_color_table[i])
self.default_foreground_color = gen_color_func(self.default_foreground_color)
self.default_background_color = gen_color_func(self.default_background_color)
self.default_cursor_color = gen_color_func(self.default_cursor_color)
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import sys
import mock
import netaddr
import webob.exc
from neutron.agent import securitygroups_rpc as sg_cfg
from neutron.common import rpc as n_rpc
from neutron import context
from neutron.db import api as db_api
from neutron.db import model_base
from neutron import manager
from neutron.tests.unit.ml2.drivers.cisco.apic import (
test_cisco_apic_common as mocked)
from neutron.tests.unit.ml2 import test_ml2_plugin as test_plugin
from opflexagent import constants as ocst
from oslo.config import cfg
sys.modules["apicapi"] = mock.Mock()
from gbpservice.neutron.services.grouppolicy import (
group_policy_context as p_context)
from gbpservice.neutron.services.grouppolicy import config
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
apic_mapping as amap)
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_rmd)
APIC_L2_POLICY = 'l2_policy'
APIC_L3_POLICY = 'l3_policy'
APIC_POLICY_RULE_SET = 'policy_rule_set'
APIC_POLICY_TARGET_GROUP = 'policy_target_group'
APIC_POLICY_RULE = 'policy_rule'
APIC_EXTERNAL_RID = '1.0.0.1'
AGENT_TYPE = ocst.AGENT_TYPE_OPFLEX_OVS
AGENT_CONF = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE,
'configurations': {'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'}}}
def echo(context, string, prefix=''):
return prefix + string
class MockCallRecorder(mock.Mock):
recorded_call_set = set()
def __call__(self, *args, **kwargs):
self.recorded_call_set.add(self.generate_entry(*args, **kwargs))
return mock.Mock()
def call_happened_with(self, *args, **kwargs):
return self.generate_entry(*args, **kwargs) in self.recorded_call_set
def generate_entry(self, *args, **kwargs):
return args, tuple((x, kwargs[x]) for x in sorted(kwargs.keys()))
class ApicMappingTestCase(
test_rmd.ResourceMappingTestCase,
mocked.ControllerMixin, mocked.ConfigMixin):
def setUp(self):
cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP')
config.cfg.CONF.set_override('enable_security_group', False,
group='SECURITYGROUP')
n_rpc.create_connection = mock.Mock()
amap.ApicMappingDriver.get_apic_manager = mock.Mock()
self.set_up_mocks()
ml2_opts = {
'mechanism_drivers': ['apic_gbp'],
'type_drivers': ['opflex'],
'tenant_network_types': ['opflex']
}
mock.patch('gbpservice.neutron.services.grouppolicy.drivers.cisco.'
'apic.apic_mapping.ApicMappingDriver._setup_rpc').start()
host_agents = mock.patch('neutron.plugins.ml2.driver_context.'
'PortContext.host_agents').start()
host_agents.return_value = [AGENT_CONF]
nova_client = mock.patch(
'gbpservice.neutron.services.grouppolicy.drivers.cisco.'
'apic.nova_client.NovaClient.get_server').start()
vm = mock.Mock()
vm.name = 'someid'
nova_client.return_value = vm
super(ApicMappingTestCase, self).setUp(
policy_drivers=['implicit_policy', 'apic'],
core_plugin=test_plugin.PLUGIN_NAME, ml2_options=ml2_opts)
engine = db_api.get_engine()
model_base.BASEV2.metadata.create_all(engine)
plugin = manager.NeutronManager.get_plugin()
plugin.remove_networks_from_down_agents = mock.Mock()
plugin.is_agent_down = mock.Mock(return_value=False)
self.driver = manager.NeutronManager.get_service_plugins()[
'GROUP_POLICY'].policy_driver_manager.policy_drivers['apic'].obj
amap.ApicMappingDriver.get_base_synchronizer = mock.Mock()
self.driver.name_mapper = mock.Mock()
self.driver.name_mapper.tenant = echo
self.driver.name_mapper.l2_policy = echo
self.driver.name_mapper.l3_policy = echo
self.driver.name_mapper.policy_rule_set = echo
self.driver.name_mapper.policy_rule = echo
self.driver.name_mapper.app_profile.return_value = mocked.APIC_AP
self.driver.name_mapper.policy_target_group = echo
self.driver.name_mapper.external_policy = echo
self.driver.name_mapper.external_segment = echo
self.driver.apic_manager = mock.Mock(name_mapper=mock.Mock(),
ext_net_dict={})
self.driver.apic_manager.apic.transaction = self.fake_transaction
self.driver.notifier = mock.Mock()
self.driver.apic_manager.ext_net_dict = {}
amap.apic_manager.TENANT_COMMON = 'common'
self.common_tenant = amap.apic_manager.TENANT_COMMON
def _build_external_dict(self, name, cidr_exposed):
return {name: {
'switch': mocked.APIC_EXT_SWITCH,
'port': mocked.APIC_EXT_MODULE + '/' + mocked.APIC_EXT_PORT,
'encap': mocked.APIC_EXT_ENCAP,
'router_id': APIC_EXTERNAL_RID,
'cidr_exposed': cidr_exposed,
'gateway_ip': str(netaddr.IPNetwork(cidr_exposed)[1])}}
def _mock_external_dict(self, data):
self.driver.apic_manager.ext_net_dict = {}
for x in data:
self.driver.apic_manager.ext_net_dict.update(
self._build_external_dict(x[0], x[1]))
def _check_call_list(self, expected, observed):
for call in expected:
self.assertTrue(call in observed,
msg='Call not found, expected:\n%s\nobserved:'
'\n%s' % (str(call), str(observed)))
observed.remove(call)
self.assertFalse(
len(observed),
msg='There are more calls than expected: %s' % str(observed))
def _create_simple_policy_rule(self, direction='bi', protocol='tcp',
port_range=80, shared=False,
action_type='allow', action_value=None):
cls = self.create_policy_classifier(
direction=direction, protocol=protocol,
port_range=port_range, shared=shared)['policy_classifier']
action = self.create_policy_action(
action_type=action_type, shared=shared,
action_value=action_value)['policy_action']
return self.create_policy_rule(
policy_classifier_id=cls['id'], policy_actions=[action['id']],
shared=shared)['policy_rule']
def _bind_port_to_host(self, port_id, host):
plugin = manager.NeutronManager.get_plugin()
ctx = context.get_admin_context()
agent = {'host': host}
agent.update(AGENT_CONF)
plugin.create_or_update_agent(ctx, agent)
data = {'port': {'binding:host_id': host, 'device_owner': 'compute:',
'device_id': 'someid'}}
# Create EP with bound port
req = self.new_update_request('ports', data, port_id,
self.fmt)
return self.deserialize(self.fmt, req.get_response(self.api))
class TestPolicyTarget(ApicMappingTestCase):
def test_policy_target_port_deleted_on_apic(self):
ptg = self.create_policy_target_group()['policy_target_group']
subnet = self._get_object('subnets', ptg['subnets'][0], self.api)
with self.port(subnet=subnet) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
pt = self.create_policy_target(
policy_target_group_id=ptg['id'], port_id=port['port']['id'])
self.new_delete_request(
'policy_targets', pt['policy_target']['id'],
self.fmt).get_response(self.ext_api)
self.assertTrue(self.driver.notifier.port_update.called)
def test_policy_target_delete_no_port(self):
ptg = self.create_policy_target_group()['policy_target_group']
subnet = self._get_object('subnets', ptg['subnets'][0], self.api)
with self.port(subnet=subnet) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
pt = self.create_policy_target(
policy_target_group_id=ptg['id'], port_id=port['port']['id'])
res = self.new_delete_request('ports', port['port']['id'],
self.fmt).get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
self.delete_policy_target(pt['policy_target']['id'],
expected_res_status=404)
def test_delete_policy_target_notification_no_apic_network(self):
ptg = self.create_policy_target_group(
name="ptg1")['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
# Implicit port will be deleted with the PT
self.delete_policy_target(pt1['id'], expected_res_status=204)
# No notification needed
self.assertFalse(self.driver.notifier.port_update.called)
self.driver.notifier.port_update.reset_mock()
subnet = self._get_object('subnets', ptg['subnets'][0], self.api)
with self.port(subnet=subnet) as port:
# Create EP with bound port
port = self._bind_port_to_host(port['port']['id'], 'h1')
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'], port_id=port['port']['id'])
# Explicit port won't be deleted with PT
self.delete_policy_target(pt1['policy_target']['id'],
expected_res_status=204)
# Issue notification for the agent
self.assertTrue(self.driver.notifier.port_update.called)
def test_get_gbp_details(self):
ptg = self.create_policy_target_group(
name="ptg1")['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
mapping = self.driver.get_gbp_details(context.get_admin_context(),
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(pt1['port_id'], mapping['port_id'])
self.assertEqual(ptg['id'], mapping['endpoint_group_name'])
self.assertEqual('someid', mapping['vm-name'])
def test_get_gbp_details_shadow(self):
l2p = self.create_l2_policy()['l2_policy']
network = self._get_object('networks', l2p['network_id'], self.api)
with self.subnet(network=network) as sub:
with self.port(subnet=sub) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
mapping = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % port['port']['id'], host='h1')
self.assertEqual(port['port']['id'], mapping['port_id'])
self.assertEqual(amap.SHADOW_PREFIX + l2p['id'],
mapping['endpoint_group_name'])
def test_explicit_port(self):
with self.network() as net:
with self.subnet(network=net) as sub:
with self.port(subnet=sub) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
l2p = self.create_l2_policy(
network_id=net['network']['id'])['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
self.create_policy_target(
port_id=port['port']['id'],
policy_target_group_id=ptg['id'])
self.assertTrue(self.driver.notifier.port_update.called)
def test_port_notified_on_changed_ptg(self):
ptg = self.create_policy_target_group()['policy_target_group']
ptg2 = self.create_policy_target_group(
l2_policy_id=ptg['l2_policy_id'])['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt['port_id'], 'h1')
self.driver.notifier.port_update.reset_mock()
self.update_policy_target(pt['id'], policy_target_group_id=ptg2['id'])
self.assertTrue(self.driver.notifier.port_update.called)
def test_update_ptg_failed(self):
ptg = self.create_policy_target_group()['policy_target_group']
ptg2 = self.create_policy_target_group()['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
res = self.update_policy_target(
pt['id'], policy_target_group_id=ptg2['id'],
expected_res_status=400)
self.assertEqual('InvalidPortForPTG', res['NeutronError']['type'])
class TestPolicyTargetGroup(ApicMappingTestCase):
def _test_policy_target_group_created_on_apic(self, shared=False):
ptg = self.create_policy_target_group(
name="ptg1", shared=shared)['policy_target_group']
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr = self.driver.apic_manager
expected_calls = [
mock.call(tenant, ptg['id'], bd_name=ptg['l2_policy_id'],
bd_owner=tenant),
mock.call(tenant, amap.SHADOW_PREFIX + ptg['l2_policy_id'],
bd_name=ptg['l2_policy_id'], bd_owner=tenant,
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.ensure_epg_created.call_args_list)
def test_policy_target_group_created_on_apic(self):
self._test_policy_target_group_created_on_apic()
def test_policy_target_group_created_on_apic_shared(self):
self._test_policy_target_group_created_on_apic(shared=True)
def _test_ptg_policy_rule_set_created(self, provider=True, shared=False):
cntr = self.create_policy_rule_set(name='c',
shared=shared)['policy_rule_set']
l2p = self.create_l2_policy()['l2_policy']
mgr = self.driver.apic_manager
mgr.set_contract_for_epg.reset_mock()
if provider:
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'],
provided_policy_rule_sets={cntr['id']: 'scope'})[
'policy_target_group']
else:
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'],
consumed_policy_rule_sets={cntr['id']: 'scope'})[
'policy_target_group']
# Verify that the apic call is issued
ct_owner = self.common_tenant if shared else cntr['tenant_id']
expected_calls = [
mock.call(
ptg['tenant_id'], ptg['id'], cntr['id'],
transaction=mock.ANY, contract_owner=ct_owner,
provider=provider),
mock.call(
ptg['tenant_id'], ptg['id'],
amap.SERVICE_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY, contract_owner=ptg['tenant_id'],
provider=False),
mock.call(
ptg['tenant_id'], ptg['id'],
amap.IMPLICIT_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY, contract_owner=ptg['tenant_id'],
provider=True),
mock.call(
ptg['tenant_id'], ptg['id'],
amap.IMPLICIT_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY, contract_owner=ptg['tenant_id'],
provider=False)]
self._check_call_list(expected_calls,
mgr.set_contract_for_epg.call_args_list)
def _test_ptg_policy_rule_set_updated(self, provider=True, shared=False):
p_or_c = {True: 'provided_policy_rule_sets',
False: 'consumed_policy_rule_sets'}
cntr = self.create_policy_rule_set(
name='c1', shared=shared)['policy_rule_set']
new_cntr = self.create_policy_rule_set(
name='c2', shared=shared)['policy_rule_set']
if provider:
ptg = self.create_policy_target_group(
provided_policy_rule_sets={cntr['id']: 'scope'})
else:
ptg = self.create_policy_target_group(
consumed_policy_rule_sets={cntr['id']: 'scope'})
data = {'policy_target_group': {p_or_c[provider]:
{new_cntr['id']: 'scope'}}}
req = self.new_update_request('policy_target_groups', data,
ptg['policy_target_group']['id'],
self.fmt)
ptg = self.deserialize(self.fmt, req.get_response(self.ext_api))
ptg = ptg['policy_target_group']
mgr = self.driver.apic_manager
ct_owner = self.common_tenant if shared else cntr['tenant_id']
mgr.set_contract_for_epg.assert_called_with(
ptg['tenant_id'], ptg['id'], new_cntr['id'],
contract_owner=ct_owner, transaction=mock.ANY,
provider=provider)
mgr.unset_contract_for_epg.assert_called_with(
ptg['tenant_id'], ptg['id'], cntr['id'],
contract_owner=ct_owner,
transaction=mock.ANY, provider=provider)
def test_ptg_policy_rule_set_provider_created(self):
self._test_ptg_policy_rule_set_created()
def test_ptg_policy_rule_set_provider_updated(self):
self._test_ptg_policy_rule_set_updated()
def test_ptg_policy_rule_set_consumer_created(self):
self._test_ptg_policy_rule_set_created(False)
def test_ptg_policy_rule_set_consumer_updated(self):
self._test_ptg_policy_rule_set_updated(False)
def test_ptg_policy_rule_set_provider_created_shared(self):
self._test_ptg_policy_rule_set_created(shared=True)
def test_ptg_policy_rule_set_provider_updated_shared(self):
self._test_ptg_policy_rule_set_updated(shared=True)
def test_ptg_policy_rule_set_consumer_created_shared(self):
self._test_ptg_policy_rule_set_created(False, shared=True)
def test_ptg_policy_rule_set_consumer_updated_shared(self):
self._test_ptg_policy_rule_set_updated(False, shared=True)
def _test_policy_target_group_deleted_on_apic(self, shared=False):
ptg = self.create_policy_target_group(
name="ptg1", shared=shared)['policy_target_group']
req = self.new_delete_request('policy_target_groups',
ptg['id'], self.fmt)
req.get_response(self.ext_api)
mgr = self.driver.apic_manager
tenant = self.common_tenant if shared else ptg['tenant_id']
expected_calls = [
mock.call(tenant, ptg['id']),
mock.call(tenant, amap.SHADOW_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY)]
self._check_call_list(expected_calls,
mgr.delete_epg_for_network.call_args_list)
def test_policy_target_group_deleted_on_apic(self):
self._test_policy_target_group_deleted_on_apic()
def test_policy_target_group_deleted_on_apic_shared(self):
self._test_policy_target_group_deleted_on_apic(shared=True)
def _test_policy_target_group_subnet_created_on_apic(self, shared=False):
ptg = self._create_explicit_subnet_ptg('10.0.0.0/24', shared=shared)
mgr = self.driver.apic_manager
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
tenant, ptg['l2_policy_id'], '10.0.0.1/24',
transaction=mock.ANY)
def test_policy_target_group_subnet_created_on_apic(self):
self._test_policy_target_group_subnet_created_on_apic()
def test_policy_target_group_subnet_created_on_apic_shared(self):
self._test_policy_target_group_subnet_created_on_apic(shared=True)
def _test_policy_target_group_subnet_added(self, shared=False):
ptg = self._create_explicit_subnet_ptg('10.0.0.0/24', shared=shared)
l2p = self._get_object('l2_policies', ptg['l2_policy_id'],
self.ext_api)
network = self._get_object('networks', l2p['l2_policy']['network_id'],
self.api)
with self.subnet(network=network, cidr='10.0.1.0/24') as subnet:
data = {'policy_target_group':
{'subnets': ptg['subnets'] + [subnet['subnet']['id']]}}
mgr = self.driver.apic_manager
self.new_update_request('policy_target_groups', data, ptg['id'],
self.fmt).get_response(self.ext_api)
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr.ensure_subnet_created_on_apic.assert_called_with(
tenant, ptg['l2_policy_id'], '10.0.1.1/24',
transaction=mock.ANY)
def test_policy_target_group_subnet_added(self):
self._test_policy_target_group_subnet_added()
def test_policy_target_group_subnet_added_shared(self):
self._test_policy_target_group_subnet_added(shared=True)
def _test_process_subnet_update(self, shared=False):
ptg = self._create_explicit_subnet_ptg('10.0.0.0/24', shared=shared)
subnet = self._get_object('subnets', ptg['subnets'][0], self.api)
subnet2 = copy.deepcopy(subnet)
subnet2['subnet']['gateway_ip'] = '10.0.0.254'
mgr = self.driver.apic_manager
mgr.reset_mock()
self.driver.process_subnet_changed(context.get_admin_context(),
subnet['subnet'], subnet2['subnet'])
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
tenant, ptg['l2_policy_id'], '10.0.0.254/24',
transaction=mock.ANY)
mgr.ensure_subnet_deleted_on_apic.assert_called_with(
tenant, ptg['l2_policy_id'], '10.0.0.1/24',
transaction=mock.ANY)
def test_process_subnet_update(self):
self._test_process_subnet_update()
def test_process_subnet_update_shared(self):
self._test_process_subnet_update(shared=True)
def test_multiple_ptg_per_l2p(self):
l2p = self.create_l2_policy()['l2_policy']
# Create first PTG
ptg1 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
ptg2 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
self.assertEqual(ptg1['subnets'], ptg2['subnets'])
def test_force_add_subnet(self):
l2p = self.create_l2_policy()['l2_policy']
# Create first PTG
ptg1 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
ptg2 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
ctx = p_context.PolicyTargetGroupContext(
self.driver.gbp_plugin, context.get_admin_context(), ptg2)
# Emulate force add
self.driver._use_implicit_subnet(ctx, force_add=True)
# There now a new subnet, and it's added to both the PTGs
self.assertEqual(2, len(ctx.current['subnets']))
ptg1 = self.show_policy_target_group(ptg1['id'])['policy_target_group']
self.assertEqual(2, len(ptg1['subnets']))
ptg2 = self.show_policy_target_group(ptg2['id'])['policy_target_group']
self.assertEqual(2, len(ptg2['subnets']))
self.assertEqual(set(ptg1['subnets']), set(ptg2['subnets']))
self.assertNotEqual(ptg2['subnets'][0], ptg2['subnets'][1])
def test_subnets_unique_per_l3p(self):
l3p = self.create_l3_policy(shared=True, tenant_id='admin',
is_admin_context=True)['l3_policy']
l2p1 = self.create_l2_policy(
tenant_id='hr', l3_policy_id=l3p['id'])['l2_policy']
l2p2 = self.create_l2_policy(
tenant_id='eng', l3_policy_id=l3p['id'])['l2_policy']
ptg1 = self.create_policy_target_group(
tenant_id='hr', l2_policy_id=l2p1['id'])['policy_target_group']
ptg2 = self.create_policy_target_group(
tenant_id='eng', l2_policy_id=l2p2['id'])['policy_target_group']
sub_ptg_1 = set(self._get_object('subnets',
x, self.api)['subnet']['cidr']
for x in ptg1['subnets'])
sub_ptg_2 = set(self._get_object('subnets',
x, self.api)['subnet']['cidr']
for x in ptg2['subnets'])
self.assertNotEqual(sub_ptg_1, sub_ptg_2)
self.assertFalse(sub_ptg_1 & sub_ptg_2)
def _create_explicit_subnet_ptg(self, cidr, shared=False):
l2p = self.create_l2_policy(name="l2p", shared=shared)
l2p_id = l2p['l2_policy']['id']
network_id = l2p['l2_policy']['network_id']
network = self._get_object('networks', network_id, self.api)
with self.subnet(network=network, cidr=cidr):
# The subnet creation in the proper network causes the subnet ID
# to be added to the PTG
return self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_id,
shared=shared)['policy_target_group']
class TestL2Policy(ApicMappingTestCase):
def _test_l2_policy_created_on_apic(self, shared=False):
l2p = self.create_l2_policy(name="l2p", shared=shared)['l2_policy']
tenant = self.common_tenant if shared else l2p['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_bd_created_on_apic.assert_called_once_with(
tenant, l2p['id'], ctx_owner=tenant, ctx_name=l2p['l3_policy_id'],
transaction=mock.ANY)
mgr.ensure_epg_created.assert_called_once_with(
tenant, amap.SHADOW_PREFIX + l2p['id'], bd_owner=tenant,
bd_name=l2p['id'], transaction=mock.ANY)
def test_l2_policy_created_on_apic(self):
self._test_l2_policy_created_on_apic()
def test_l2_policy_created_on_apic_shared(self):
self._test_l2_policy_created_on_apic(shared=True)
def _test_l2_policy_deleted_on_apic(self, shared=False):
l2p = self.create_l2_policy(name="l2p", shared=shared)['l2_policy']
req = self.new_delete_request('l2_policies', l2p['id'], self.fmt)
req.get_response(self.ext_api)
tenant = self.common_tenant if shared else l2p['tenant_id']
mgr = self.driver.apic_manager
mgr.delete_bd_on_apic.assert_called_once_with(
tenant, l2p['id'], transaction=mock.ANY)
mgr.delete_epg_for_network.assert_called_once_with(
tenant, amap.SHADOW_PREFIX + l2p['id'],
transaction=mock.ANY)
expected_calls = [
mock.call(amap.IMPLICIT_PREFIX + l2p['id'], owner=tenant,
transaction=mock.ANY),
mock.call(amap.SERVICE_PREFIX + l2p['id'], owner=tenant,
transaction=mock.ANY)]
self._check_call_list(expected_calls,
mgr.delete_contract.call_args_list)
def test_l2_policy_deleted_on_apic(self):
self._test_l2_policy_deleted_on_apic()
def test_l2_policy_deleted_on_apic_shared(self):
self._test_l2_policy_deleted_on_apic(shared=True)
def test_pre_existing_subnets_added(self):
with self.network() as net:
with self.subnet(network=net) as sub:
sub = sub['subnet']
l2p = self.create_l2_policy(
network_id=net['network']['id'])['l2_policy']
mgr = self.driver.apic_manager
mgr.ensure_subnet_created_on_apic.assert_called_with(
l2p['tenant_id'], l2p['id'],
sub['gateway_ip'] + '/' + sub['cidr'].split('/')[1],
transaction=mock.ANY)
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
self.assertEqual(ptg['subnets'], [sub['id']])
class TestL3Policy(ApicMappingTestCase):
def _test_l3_policy_created_on_apic(self, shared=False):
l3p = self.create_l3_policy(name="l3p", shared=shared)['l3_policy']
tenant = self.common_tenant if shared else l3p['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_context_enforced.assert_called_once_with(
tenant, l3p['id'])
def test_l3_policy_created_on_apic(self):
self._test_l3_policy_created_on_apic()
def test_l3_policy_created_on_apic_shared(self):
self._test_l3_policy_created_on_apic(shared=True)
def _test_l3_policy_deleted_on_apic(self, shared=False):
l3p = self.create_l3_policy(name="l3p", shared=shared)['l3_policy']
req = self.new_delete_request('l3_policies', l3p['id'], self.fmt)
req.get_response(self.ext_api)
tenant = self.common_tenant if shared else l3p['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_context_deleted.assert_called_once_with(
tenant, l3p['id'])
def test_l3_policy_deleted_on_apic(self):
self._test_l3_policy_deleted_on_apic()
def test_l3_policy_deleted_on_apic_shared(self):
self._test_l3_policy_deleted_on_apic(shared=True)
def _test_one_l3_policy_per_es(self, shared_es=False):
# Verify 2 L3P created on same ES fails
es = self.create_external_segment(
cidr='192.168.0.0/24', shared=shared_es)['external_segment']
self.create_l3_policy(external_segments={es['id']: ['192.168.0.1']},
expected_res_status=201)
res = self.create_l3_policy(
external_segments={es['id']: ['192.168.0.2']},
expected_res_status=400)
self.assertEqual('OnlyOneL3PolicyIsAllowedPerExternalSegment',
res['NeutronError']['type'])
# Verify existing L3P updated to use used ES fails
sneaky_l3p = self.create_l3_policy()['l3_policy']
res = self.update_l3_policy(
sneaky_l3p['id'], expected_res_status=400,
external_segments={es['id']: ['192.168.0.3']})
self.assertEqual('OnlyOneL3PolicyIsAllowedPerExternalSegment',
res['NeutronError']['type'])
def test_one_l3_policy_per_es(self):
self._test_one_l3_policy_per_es(shared_es=False)
def test_one_l3_policy_per_es_shared(self):
self._test_one_l3_policy_per_es(shared_es=True)
def test_one_l3_policy_ip_on_es(self):
# Verify L3P created with more than 1 IP on ES fails
es = self.create_external_segment(
cidr='192.168.0.0/24')['external_segment']
res = self.create_l3_policy(
external_segments={es['id']: ['192.168.0.2', '192.168.0.3']},
expected_res_status=400)
self.assertEqual('OnlyOneAddressIsAllowedPerExternalSegment',
res['NeutronError']['type'])
# Verify L3P updated to more than 1 IP on ES fails
sneaky_l3p = self.create_l3_policy(
external_segments={es['id']: ['192.168.0.2']},
expected_res_status=201)['l3_policy']
res = self.update_l3_policy(
sneaky_l3p['id'], expected_res_status=400,
external_segments={es['id']: ['192.168.0.2', '192.168.0.3']})
self.assertEqual('OnlyOneAddressIsAllowedPerExternalSegment',
res['NeutronError']['type'])
def _test_l3p_plugged_to_es_at_creation(self, shared_es, shared_l3p):
# Verify L3P is correctly plugged to ES on APIC during create
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
# Create with explicit address
l3p = self.create_l3_policy(
shared=shared_l3p,
tenant_id=es['tenant_id'] if not shared_es else 'another_tenant',
external_segments={es['id']: ['192.168.0.3']},
expected_res_status=201)['l3_policy']
owner = self.common_tenant if shared_es else es['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_external_routed_network_created.assert_called_once_with(
es['id'], owner=owner, context=l3p['id'],
transaction=mock.ANY)
mgr.ensure_logical_node_profile_created.assert_called_once_with(
es['id'], mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE,
mocked.APIC_EXT_PORT, mocked.APIC_EXT_ENCAP, '192.168.0.3/24',
owner=owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY)
expected_route_calls = [
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.254',
owner=owner, subnet='0.0.0.0/0',
transaction=mock.ANY),
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.1',
owner=owner, subnet='128.0.0.0/16',
transaction=mock.ANY)]
self._check_call_list(expected_route_calls,
mgr.ensure_static_route_created.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_plugged_to_es_at_creation_1(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=False)
def test_l3p_plugged_to_es_at_creation_2(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=True)
def test_l3p_plugged_to_es_at_creation_3(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=False,
shared_l3p=False)
def _test_l3p_plugged_to_es_at_update(self, shared_es, shared_l3p):
# Verify L3P is correctly plugged to ES on APIC during update
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
# Create with explicit address
l3p = self.create_l3_policy(
expected_res_status=201,
tenant_id=es['tenant_id'] if not shared_es else 'another_tenant',
shared=shared_l3p)['l3_policy']
l3p = self.update_l3_policy(
l3p['id'], tenant_id=l3p['tenant_id'], expected_res_status=200,
external_segments={es['id']: ['192.168.0.3']})['l3_policy']
mgr = self.driver.apic_manager
owner = self.common_tenant if shared_es else es['tenant_id']
mgr.ensure_external_routed_network_created.assert_called_once_with(
es['id'], owner=owner, context=l3p['id'],
transaction=mock.ANY)
mgr.ensure_logical_node_profile_created.assert_called_once_with(
es['id'], mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE,
mocked.APIC_EXT_PORT, mocked.APIC_EXT_ENCAP, '192.168.0.3/24',
owner=owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY)
expected_route_calls = [
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.254',
owner=owner, subnet='0.0.0.0/0',
transaction=mock.ANY),
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.1',
owner=owner, subnet='128.0.0.0/16',
transaction=mock.ANY)]
self._check_call_list(expected_route_calls,
mgr.ensure_static_route_created.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_plugged_to_es_at_update_1(self):
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=False)
def test_l3p_plugged_to_es_at_update_2(self):
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=True)
def test_l3p_plugged_to_es_at_update_3(self):
self._test_l3p_plugged_to_es_at_update(shared_es=False,
shared_l3p=False)
def _test_l3p_unplugged_from_es_on_delete(self, shared_es, shared_l3p):
self._mock_external_dict([('supported1', '192.168.0.2/24'),
('supported2', '192.168.1.2/24')])
es1 = self.create_external_segment(
name='supported1', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
es2 = self.create_external_segment(
shared=shared_es, name='supported2',
cidr='192.168.1.0/24')['external_segment']
l3p = self.create_l3_policy(
external_segments={es1['id']: ['192.168.0.3']}, shared=shared_l3p,
tenant_id=es1['tenant_id'] if not shared_es else 'another_tenant',
expected_res_status=201)['l3_policy']
req = self.new_delete_request('l3_policies', l3p['id'], self.fmt)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
mgr = self.driver.apic_manager
owner = self.common_tenant if shared_es else es1['tenant_id']
mgr.delete_external_routed_network.assert_called_once_with(
es1['id'], owner=owner)
mgr.delete_external_routed_network.reset_mock()
# Verify correct deletion for 2 ESs
l3p = self.create_l3_policy(
shared=shared_l3p,
tenant_id=es1['tenant_id'] if not shared_es else 'another_tenant',
external_segments={es1['id']: ['192.168.0.3'],
es2['id']: ['192.168.1.3']},
expected_res_status=201)['l3_policy']
req = self.new_delete_request('l3_policies', l3p['id'], self.fmt)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
expected_delete_calls = [
mock.call(es1['id'], owner=owner),
mock.call(es2['id'], owner=owner)]
self._check_call_list(
expected_delete_calls,
mgr.delete_external_routed_network.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_unplugged_from_es_on_delete_1(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_delete_2(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=True)
def test_l3p_unplugged_from_es_on_delete_3(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=False,
shared_l3p=False)
def _test_l3p_unplugged_from_es_on_update(self, shared_es, shared_l3p):
self._mock_external_dict([('supported1', '192.168.0.2/24'),
('supported2', '192.168.1.2/24')])
es1 = self.create_external_segment(
name='supported1', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
es2 = self.create_external_segment(
shared=shared_es,
name='supported2', cidr='192.168.1.0/24')['external_segment']
l3p = self.create_l3_policy(
tenant_id=es1['tenant_id'] if not shared_es else 'another_tenant',
shared=shared_l3p,
external_segments={es1['id']: ['192.168.0.3']},
expected_res_status=201)['l3_policy']
mgr = self.driver.apic_manager
owner = self.common_tenant if shared_es else es1['tenant_id']
mgr.ensure_external_routed_network_created.reset_mock()
mgr.ensure_logical_node_profile_created.reset_mock()
mgr.ensure_static_route_created.reset_mock()
l3p = self.update_l3_policy(
l3p['id'], tenant_id=l3p['tenant_id'], expected_res_status=200,
external_segments={es2['id']: ['192.168.1.3']})['l3_policy']
mgr.delete_external_routed_network.assert_called_once_with(
es1['id'], owner=owner)
mgr.ensure_external_routed_network_created.assert_called_once_with(
es2['id'], owner=owner, context=l3p['id'],
transaction=mock.ANY)
mgr.ensure_logical_node_profile_created.assert_called_once_with(
es2['id'], mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE,
mocked.APIC_EXT_PORT, mocked.APIC_EXT_ENCAP, '192.168.1.3/24',
owner=owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY)
self.assertFalse(mgr.ensure_static_route_created.called)
mgr.delete_external_routed_network.reset_mock()
self.update_l3_policy(
l3p['id'], expected_res_status=200, tenant_id=l3p['tenant_id'],
external_segments={es1['id']: ['192.168.0.3'],
es2['id']: ['192.168.1.3']})
self.update_l3_policy(
l3p['id'], tenant_id=l3p['tenant_id'],
expected_res_status=200, external_segments={})
expected_delete_calls = [
mock.call(es1['id'], owner=owner),
mock.call(es2['id'], owner=owner)]
self._check_call_list(
expected_delete_calls,
mgr.delete_external_routed_network.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_unplugged_from_es_on_update_1(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_update_2(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=True)
def test_l3p_unplugged_from_es_on_update_3(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=False,
shared_l3p=False)
def test_verify_unsupported_es_noop(self):
# Verify L3P is correctly plugged to ES on APIC during update
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='unsupported', cidr='192.168.0.0/24')['external_segment']
self.create_l3_policy(
external_segments={es['id']: ['192.168.0.3']},
expected_res_status=201)
mgr = self.driver.apic_manager
self.assertFalse(mgr.ensure_external_routed_network_created.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
def test_cidr_exposd(self):
# Verify "cidr_exposed" configuration is assigned to L3P when no
# explicit address is configured
self._mock_external_dict([('supported1', '192.168.0.2/24'),
('supported2', '192.168.1.2/24')])
es1 = self.create_external_segment(
name='supported1', cidr='192.168.0.0/24')['external_segment']
es2 = self.create_external_segment(
name='supported2', cidr='192.168.1.0/24')['external_segment']
l3p = self.create_l3_policy(
external_segments={es1['id']: []},
expected_res_status=201)['l3_policy']
self.assertEqual(['192.168.0.2'], l3p['external_segments'][es1['id']])
l3p = self.update_l3_policy(
l3p['id'], expected_res_status=200,
external_segments={es1['id']: [], es2['id']: []})['l3_policy']
self.assertEqual(['192.168.0.2'], l3p['external_segments'][es1['id']])
self.assertEqual(['192.168.1.2'], l3p['external_segments'][es2['id']])
# Address IP changed
l3p = self.update_l3_policy(
l3p['id'], expected_res_status=200,
external_segments={es1['id']: ['192.168.0.3'],
es2['id']: []})['l3_policy']
self.assertEqual(['192.168.0.3'], l3p['external_segments'][es1['id']])
self.assertEqual(['192.168.1.2'], l3p['external_segments'][es2['id']])
class TestPolicyRuleSet(ApicMappingTestCase):
# TODO(ivar): verify rule intersection with hierarchical PRS happens
# on APIC
def _test_policy_rule_set_created_on_apic(self, shared=False):
ct = self.create_policy_rule_set(name="ctr",
shared=shared)['policy_rule_set']
tenant = self.common_tenant if shared else ct['tenant_id']
mgr = self.driver.apic_manager
mgr.create_contract.assert_called_once_with(
ct['id'], owner=tenant, transaction=mock.ANY)
def test_policy_rule_set_created_on_apic(self):
self._test_policy_rule_set_created_on_apic()
def test_policy_rule_set_created_on_apic_shared(self):
self._test_policy_rule_set_created_on_apic(shared=True)
def _test_policy_rule_set_created_with_rules(self, shared=False):
bi, in_d, out = range(3)
rules = self._create_3_direction_rules(shared=shared)
# exclude BI rule for now
ctr = self.create_policy_rule_set(
name="ctr", policy_rules=[x['id'] for x in rules[1:]])[
'policy_rule_set']
rule_owner = self.common_tenant if shared else rules[0]['tenant_id']
# Verify that the in-out rules are correctly enforced on the APIC
mgr = self.driver.apic_manager
expected_calls = [
mock.call(ctr['id'], ctr['id'], rules[in_d]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner),
mock.call(ctr['id'], ctr['id'],
amap.REVERSE_PREFIX + rules[out]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner)]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_in_filter.call_args_list)
expected_calls = [
mock.call(ctr['id'], ctr['id'], rules[out]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner),
mock.call(ctr['id'], ctr['id'],
amap.REVERSE_PREFIX + rules[in_d]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner)]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_out_filter.call_args_list)
# Create policy_rule_set with BI rule
ctr = self.create_policy_rule_set(
name="ctr", policy_rules=[rules[bi]['id']])['policy_rule_set']
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], rules[bi]['id'], owner=ctr['tenant_id'],
transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], rules[bi]['id'], owner=ctr['tenant_id'],
transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], amap.REVERSE_PREFIX + rules[bi]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], amap.REVERSE_PREFIX + rules[bi]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
def test_policy_rule_set_created_with_rules(self):
self._test_policy_rule_set_created_with_rules()
def test_policy_rule_set_created_with_rules_shared(self):
self._test_policy_rule_set_created_with_rules(shared=True)
def _test_policy_rule_set_updated_with_new_rules(self, shared=False):
bi, in_d, out = range(3)
old_rules = self._create_3_direction_rules(shared=shared)
new_rules = self._create_3_direction_rules(shared=shared)
# exclude BI rule for now
ctr = self.create_policy_rule_set(
name="ctr",
policy_rules=[x['id'] for x in old_rules[1:]])['policy_rule_set']
data = {'policy_rule_set': {
'policy_rules': [x['id'] for x in new_rules[1:]]}}
rule_owner = (self.common_tenant if shared else
old_rules[in_d]['tenant_id'])
mgr = self.driver.apic_manager
mgr.manage_contract_subject_in_filter = MockCallRecorder()
mgr.manage_contract_subject_out_filter = MockCallRecorder()
self.new_update_request(
'policy_rule_sets', data, ctr['id'], self.fmt).get_response(
self.ext_api)
# Verify old IN rule unset and new IN rule set
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[in_d]['id'],
rule_owner=rule_owner,
owner=ctr['tenant_id'], transaction='transaction', unset=True))
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[in_d]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[out]['id'],
owner=ctr['tenant_id'], transaction='transaction', unset=True,
rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[out]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
ctr = self.create_policy_rule_set(
name="ctr",
policy_rules=[old_rules[0]['id']])['policy_rule_set']
data = {'policy_rule_set': {'policy_rules': [new_rules[0]['id']]}}
self.new_update_request(
'policy_rule_sets', data, ctr['id'], self.fmt).get_response(
self.ext_api)
# Verify old BI rule unset and new Bu rule set
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction', unset=True,
rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction', unset=True,
rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
def test_policy_rule_set_updated_with_new_rules(self):
self._test_policy_rule_set_updated_with_new_rules()
def test_policy_rule_set_updated_with_new_rules_shared(self):
self._test_policy_rule_set_updated_with_new_rules(shared=True)
def _create_3_direction_rules(self, shared=False):
a1 = self.create_policy_action(name='a1',
action_type='allow',
shared=shared)['policy_action']
cl_attr = {'protocol': 'tcp', 'port_range': 80}
cls = []
for direction in ['bi', 'in', 'out']:
cls.append(self.create_policy_classifier(
direction=direction, shared=shared,
**cl_attr)['policy_classifier'])
rules = []
for classifier in cls:
rules.append(self.create_policy_rule(
policy_classifier_id=classifier['id'],
policy_actions=[a1['id']],
shared=shared)['policy_rule'])
return rules
class TestPolicyRule(ApicMappingTestCase):
def _test_policy_rule_created_on_apic(self, shared=False):
pr = self._create_simple_policy_rule('in', 'tcp', 88, shared=shared)
tenant = self.common_tenant if shared else pr['tenant_id']
mgr = self.driver.apic_manager
expected_calls = [
mock.call(pr['id'], owner=tenant, etherT='ip', prot='tcp',
dToPort=88, dFromPort=88, transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
etherT='ip', prot='tcp', sToPort=88, sFromPort=88,
tcpRules='est', transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
mgr.reset_mock()
pr = self._create_simple_policy_rule('bi', None, None, shared=shared)
expected_calls = [
mock.call(pr['id'], owner=tenant, etherT='unspecified',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
def test_policy_rule_created_on_apic(self):
self._test_policy_rule_created_on_apic()
def test_policy_rule_created_on_apic_shared(self):
self._test_policy_rule_created_on_apic(shared=True)
def test_policy_rule_many_actions_rejected(self):
actions = [self.create_policy_action(
action_type='allow')['policy_action']['id'] for x in range(2)]
cls = self.create_policy_classifier(direction='in', protocol='udp',
port_range=80)['policy_classifier']
self.create_policy_rule(policy_classifier_id=cls['id'],
expected_res_status=400,
policy_actions=actions)
def _test_policy_rule_deleted_on_apic(self, shared=False):
pr = self._create_simple_policy_rule(shared=shared)
req = self.new_delete_request('policy_rules', pr['id'], self.fmt)
req.get_response(self.ext_api)
tenant = self.common_tenant if shared else pr['tenant_id']
mgr = self.driver.apic_manager
expected_calls = [
mock.call(pr['id'], owner=tenant, transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
def test_policy_rule_deleted_on_apic(self):
self._test_policy_rule_deleted_on_apic()
def test_policy_rule_deleted_on_apic_shared(self):
self._test_policy_rule_deleted_on_apic(shared=True)
def test_policy_classifier_updated(self):
pa = self.create_policy_action(
action_type='allow', is_admin_context=True,
tenant_id='admin', shared=True)['policy_action']
pc = self.create_policy_classifier(
direction='in', protocol='udp', port_range=80,
shared=True, is_admin_context=True,
tenant_id='admin')['policy_classifier']
pr1 = self.create_policy_rule(
policy_classifier_id=pc['id'], policy_actions=[pa['id']],
shared=True, is_admin_context=True,
tenant_id='admin')['policy_rule']
pr2 = self.create_policy_rule(policy_classifier_id=pc['id'],
policy_actions=[pa['id']])['policy_rule']
prs1 = self.create_policy_rule_set(
policy_rules=[pr1['id']])['policy_rule_set']
prs2 = self.create_policy_rule_set(
policy_rules=[pr2['id'], pr1['id']])['policy_rule_set']
mgr = self.driver.apic_manager
mgr.reset_mock()
# Remove Classifier port, should just delete and create the filter
self.update_policy_classifier(pc['id'], port_range=None,
is_admin_context=True)
expected_calls = [
mock.call(pr1['id'], owner='common', etherT='ip', prot='udp',
transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', etherT='ip', prot='udp',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
expected_calls = [
mock.call(pr1['id'], owner='common', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
self.assertFalse(mgr.manage_contract_subject_in_filter.called)
self.assertFalse(mgr.manage_contract_subject_out_filter.called)
mgr.reset_mock()
# Change Classifier protocol, to not revertible
self.update_policy_classifier(pc['id'], protocol='icmp',
is_admin_context=True)
expected_calls = [
mock.call(pr1['id'], owner='common', etherT='ip', prot='icmp',
transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', etherT='ip', prot='icmp',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
expected_calls = [
mock.call(pr1['id'], owner='common', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
self.assertFalse(mgr.manage_contract_subject_in_filter.called)
self.assertFalse(mgr.manage_contract_subject_out_filter.called)
mgr.reset_mock()
# Change Classifier protocol to revertible
self.update_policy_classifier(pc['id'], protocol='tcp',
is_admin_context=True)
expected_calls = [
mock.call(pr1['id'], owner='common', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
expected_calls = [
mock.call(pr1['id'], owner='common', etherT='ip', prot='tcp',
transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', etherT='ip', prot='tcp',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
etherT='ip', prot='tcp', tcpRules='est',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
etherT='ip', prot='tcp', tcpRules='est',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
expected_calls = [
# Unset PR1 and PR2 IN
mock.call(prs1['id'], prs1['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=True, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=True, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr2['id'], owner='test-tenant',
transaction=mock.ANY, unset=True,
rule_owner='test-tenant'),
# SET PR1 and PR2 IN
mock.call(prs1['id'], prs1['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=False, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=False, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr2['id'], owner='test-tenant',
transaction=mock.ANY, unset=False,
rule_owner='test-tenant')
]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_in_filter.call_args_list)
# SET Reverse PR1 and PR2 OUT
expected_calls = [
mock.call(prs1['id'], prs1['id'], amap.REVERSE_PREFIX + pr1['id'],
owner='test-tenant', transaction=mock.ANY, unset=False,
rule_owner='common'),
mock.call(prs2['id'], prs2['id'], amap.REVERSE_PREFIX + pr1['id'],
owner='test-tenant', transaction=mock.ANY, unset=False,
rule_owner='common'),
mock.call(prs2['id'], prs2['id'], amap.REVERSE_PREFIX + pr2['id'],
owner='test-tenant', transaction=mock.ANY, unset=False,
rule_owner='test-tenant')
]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_out_filter.call_args_list)
class TestExternalSegment(ApicMappingTestCase):
def test_pat_rejected(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
# Verify Rejected on create
res = self.create_external_segment(
name='supported', port_address_translation=True,
expected_res_status=400)
self.assertEqual('PATNotSupportedByApicDriver',
res['NeutronError']['type'])
# Verify Rejected on Update
es = self.create_external_segment(
name='supported', expected_res_status=201,
port_address_translation=False)['external_segment']
res = self.update_external_segment(
es['id'], expected_res_status=400, port_address_translation=True)
self.assertEqual('PATNotSupportedByApicDriver',
res['NeutronError']['type'])
def _test_create(self, shared=False):
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.create_external_segment(name='supported', expected_res_status=201,
shared=shared)
self.create_external_segment(name='unsupport', expected_res_status=201,
shared=shared)
def test_create(self):
self._test_create(False)
self._test_create(True)
def test_update_unsupported_noop(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='unsupport', cidr='192.168.0.0/24',
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}],
expected_res_status=201)['external_segment']
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[])
mgr = self.driver.apic_manager
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
self.assertFalse(mgr.ensure_static_route_created.called)
self.assertFalse(mgr.ensure_external_epg_created.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
def _test_route_update_remove(self, shared_es, shared_ep):
# Verify routes are updated correctly
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}],
expected_res_status=201)['external_segment']
# Attach 3 external policies
f = self.create_external_policy
eps = [f(external_segments=[es['id']], shared=shared_ep,
tenant_id=es['tenant_id'] if not shared_es else 'another',
expected_res_status=201)['external_policy']
for x in xrange(3)]
mgr = self.driver.apic_manager
owner = es['tenant_id'] if not shared_es else self.common_tenant
mgr.ensure_external_epg_created.reset_mock()
# Remove route completely
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'}])
mgr = self.driver.apic_manager
mgr.ensure_static_route_deleted.assert_called_with(
es['id'], mocked.APIC_EXT_SWITCH, '128.0.0.0/16',
owner=owner, transaction=mock.ANY)
expected_delete_calls = []
for ep in eps:
expected_delete_calls.append(
mock.call(es['id'], subnets=['128.0.0.0/16'],
external_epg=ep['id'], owner=owner,
transaction=mock.ANY))
self._check_call_list(
expected_delete_calls,
mgr.ensure_external_epg_routes_deleted.call_args_list)
self.assertFalse(mgr.ensure_static_route_created.called)
self.assertFalse(mgr.ensure_external_epg_created.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
# Remove nexthop only
mgr.ensure_static_route_deleted.reset_mock()
mgr.ensure_external_epg_routes_deleted.reset_mock()
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '0.0.0.0/0',
'nexthop': None}])
mgr.ensure_next_hop_deleted.assert_called_with(
es['id'], mocked.APIC_EXT_SWITCH, '0.0.0.0/0', '192.168.0.254',
owner=owner, transaction=mock.ANY)
# Being the new nexthop 'None', the default one is used
mgr.ensure_static_route_created.assert_called_with(
es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.1',
subnet='0.0.0.0/0', owner=owner, transaction=mock.ANY)
expected_delete_calls = []
for ep in eps:
expected_delete_calls.append(
mock.call(es['id'], subnet='0.0.0.0/0', external_epg=ep['id'],
owner=owner, transaction=mock.ANY))
self._check_call_list(expected_delete_calls,
mgr.ensure_external_epg_created.call_args_list)
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_route_update_remove_1(self):
self._test_route_update_remove(shared_ep=True, shared_es=True)
def test_route_update_remove_2(self):
self._test_route_update_remove(shared_ep=False, shared_es=True)
def test_route_update_remove_3(self):
self._test_route_update_remove(shared_ep=False, shared_es=False)
def _test_route_update_add(self, shared_es, shared_ep):
# Verify routes are updated correctly
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[], expected_res_status=201)['external_segment']
# Attach 3 external policies
f = self.create_external_policy
eps = [f(external_segments=[es['id']], shared=shared_ep,
tenant_id=es['tenant_id'] if not shared_es else 'another',
expected_res_status=201)['external_policy']
for x in xrange(3)]
mgr = self.driver.apic_manager
mgr.ensure_external_epg_created.reset_mock()
owner = es['tenant_id'] if not shared_es else self.common_tenant
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])
mgr.ensure_static_route_created.assert_called_with(
es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.254',
subnet='128.0.0.0/16', owner=owner, transaction=mock.ANY)
expected_create_calls = []
for ep in eps:
expected_create_calls.append(
mock.call(es['id'], subnet='128.0.0.0/16',
external_epg=ep['id'], owner=owner,
transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
mgr.ensure_static_route_created.reset_mock()
mgr.ensure_external_epg_created.reset_mock()
# Verify Route added with default gateway
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'},
{'destination': '0.0.0.0/0',
'nexthop': None}])
mgr.ensure_static_route_created.assert_called_with(
es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.1',
subnet='0.0.0.0/0', owner=owner, transaction=mock.ANY)
expected_create_calls = []
for ep in eps:
expected_create_calls.append(
mock.call(es['id'], subnet='0.0.0.0/0',
external_epg=ep['id'], owner=owner,
transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_route_update_add_1(self):
self._test_route_update_add(shared_ep=True, shared_es=True)
def test_route_update_add_2(self):
self._test_route_update_add(shared_ep=False, shared_es=True)
def test_route_update_add_3(self):
self._test_route_update_add(shared_ep=False, shared_es=False)
class TestExternalPolicy(ApicMappingTestCase):
def test_creation_noop(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
external_routes=[], expected_res_status=201)['external_segment']
self.create_external_policy(
external_segments=[es['id']], expected_res_status=201)
# Verify not called since no routes are set
mgr = self.driver.apic_manager
self.assertFalse(
mgr.ensure_external_epg_created.called,
msg='calls:\n%s' %
str(mgr.ensure_external_epg_created.call_args_list))
es = self.create_external_segment(
name='unsupported', cidr='192.168.0.0/24', expected_res_status=201,
external_routes=[{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
self.create_external_policy(
external_segments=[es['id']], expected_res_status=201,)
# Verify noop on unsupported
self.assertFalse(mgr.ensure_external_epg_created.called)
def _test_creation_no_prs(self, shared_es, shared_ep):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
ep = self.create_external_policy(
external_segments=[x['id'] for x in es_list], shared=shared_ep,
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
expected_res_status=201)['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
expected_create_calls = []
for es in es_list:
expected_create_calls.append(
mock.call(es['id'], subnet='128.0.0.0/16',
external_epg=ep['id'], owner=owner,
transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_creation_no_prs_1(self):
self._test_creation_no_prs(shared_ep=True, shared_es=True)
def test_creation_no_prs_2(self):
self._test_creation_no_prs(shared_ep=False, shared_es=True)
def test_creation_no_prs_3(self):
self._test_creation_no_prs(shared_ep=False, shared_es=False)
def _test_update_no_prs(self, shared_es, shared_ep):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
ep = self.create_external_policy(
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
shared=shared_ep, expected_res_status=201)['external_policy']
ep = self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
external_segments=[x['id'] for x in es_list])['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
expected_create_calls = []
for es in es_list:
expected_create_calls.append(
mock.call(es['id'], subnet='128.0.0.0/16',
external_epg=ep['id'], owner=owner, transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
ep = self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
external_segments=[])['external_policy']
mgr = self.driver.apic_manager
expected_create_calls = []
for es in es_list:
expected_create_calls.append(
mock.call(es['id'], owner=owner, external_epg=ep['id']))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_deleted.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_update_no_prs_1(self):
self._test_update_no_prs(shared_ep=True, shared_es=True)
def test_update_no_prs_2(self):
self._test_update_no_prs(shared_ep=False, shared_es=True)
def test_update_no_prs_3(self):
self._test_update_no_prs(shared_ep=False, shared_es=False)
def _test_create_with_prs(self, shared_es, shared_ep, shared_prs):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
prov = self._create_policy_rule_set_on_shared(
shared=shared_prs,
tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
cons = self._create_policy_rule_set_on_shared(
shared=shared_prs,
tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
ep = self.create_external_policy(
provided_policy_rule_sets={prov['id']: ''},
consumed_policy_rule_sets={cons['id']: ''}, shared=shared_ep,
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
external_segments=[x['id'] for x in es_list],
expected_res_status=201)['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
expected_calls = []
for es in es_list:
expected_calls.append(
mock.call(es['id'], prov['id'], external_epg=ep['id'],
provided=True, owner=owner,
transaction=mock.ANY))
expected_calls.append(
mock.call(es['id'], cons['id'], external_epg=ep['id'],
provided=False, owner=owner,
transaction=mock.ANY))
self._check_call_list(expected_calls,
mgr.set_contract_for_external_epg.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_create_with_prs_1(self):
self._test_create_with_prs(shared_es=True, shared_ep=True,
shared_prs=True)
def test_create_with_prs_2(self):
self._test_create_with_prs(shared_es=True, shared_ep=False,
shared_prs=True)
def test_create_with_prs_3(self):
self._test_create_with_prs(shared_es=True, shared_ep=False,
shared_prs=False)
def test_create_with_prs_4(self):
self._test_create_with_prs(shared_es=False, shared_ep=False,
shared_prs=False)
def test_create_with_prs_5(self):
self._test_create_with_prs(shared_es=False, shared_ep=False,
shared_prs=True)
def _test_update_add_prs(self, shared_es, shared_ep, shared_prs):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
prov = self._create_policy_rule_set_on_shared(
shared=shared_prs, tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
cons = self._create_policy_rule_set_on_shared(
shared=shared_prs, tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
ep = self.create_external_policy(
external_segments=[x['id'] for x in es_list], shared=shared_ep,
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
expected_res_status=201)['external_policy']
ep = self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
provided_policy_rule_sets={prov['id']: ''},
consumed_policy_rule_sets={cons['id']: ''})['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
expected_calls = []
for es in es_list:
expected_calls.append(
mock.call(es['id'], prov['id'], external_epg=ep['id'],
provided=True, owner=owner, transaction=mock.ANY))
expected_calls.append(
mock.call(es['id'], cons['id'], external_epg=ep['id'],
provided=False, owner=owner, transaction=mock.ANY))
self._check_call_list(expected_calls,
mgr.set_contract_for_external_epg.call_args_list)
ep = self.update_external_policy(
ep['id'], expected_res_status=200, provided_policy_rule_sets={},
consumed_policy_rule_sets={},
tenant_id=ep['tenant_id'])['external_policy']
expected_calls = []
for es in es_list:
expected_calls.append(
mock.call(es['id'], prov['id'], external_epg=ep['id'],
provided=True, owner=owner, transaction=mock.ANY))
expected_calls.append(
mock.call(es['id'], cons['id'], external_epg=ep['id'],
provided=False, owner=owner, transaction=mock.ANY))
self._check_call_list(
expected_calls, mgr.unset_contract_for_external_epg.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_update_add_prs_1(self):
self._test_update_add_prs(shared_es=True, shared_ep=True,
shared_prs=True)
def test_update_add_prs_2(self):
self._test_update_add_prs(shared_es=True, shared_ep=False,
shared_prs=True)
def test_update_add_prs_3(self):
self._test_update_add_prs(shared_es=True, shared_ep=False,
shared_prs=False)
def test_update_add_prs_4(self):
self._test_update_add_prs(shared_es=False, shared_ep=False,
shared_prs=False)
def test_update_add_prs_5(self):
self._test_update_add_prs(shared_es=False, shared_ep=False,
shared_prs=True)
def test_update_add_prs_unsupported(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='unsupported', cidr='192.168.0.0/24', expected_res_status=201,
external_routes=[{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
prov = self._create_policy_rule_set_on_shared()
cons = self._create_policy_rule_set_on_shared()
ep = self.create_external_policy(
external_segments=[es['id']],
expected_res_status=201)['external_policy']
self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
provided_policy_rule_sets={prov['id']: ''},
consumed_policy_rule_sets={cons['id']: ''})['external_policy']
mgr = self.driver.apic_manager
self.assertFalse(mgr.set_contract_for_external_epg.called)
| |
r"""PyPFASST IMEX SDC and FEval classes. These classes are used for
ODEs of the form
.. math::
\frac{d}{dt} y(x,t) = f(y,t) = f_1(y,t) + f_2(y,t) + b(t)
where the solver treats the :math:`f_1` piece explicitly, and the
:math:`f_2` piece implicitly.
"""
# Copyright (c) 2011, Matthew Emmett. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import feval
import sdc
class IMEXFEval(feval.FEval):
r"""IMEX function evaluation base class.
Methods in this class should be overridden with problem specific
evaluators.
Attributes:
.. attribute:: f1_evaluate
Evaluate explicit piece. Called as
>>> f1_evaluate(q, t, f1, **kwargs)
The result should be stored in *f1*.
.. attribute:: f2_evaluate
Evalute implicit piece. Called as
>>> f2_evaluate(q, t, f2, **kwargs)
The result should be stored in *f2*.
.. attribute:: f2_solve
Solve and evaluate implicit piece. Called as
>>> f2_solve(rhs, q, t, dt, f2, **kwargs)
The implicit solution of :math:`q - \Delta t f_2(q) =
\text{RHS}` should be stored in *q*. The value of
:math:`f_2(q)` should be stored in *f2*.
Note that by omitting *f1eval* or both of *f2eval* and *f2solv*,
this becomes a purely implicit or explicit evaluator, respectively.
See also :py:class:`pfasst.feval.FEval`.
"""
@property
def pieces(self):
f1eval = hasattr(self, 'f1_evaluate')
f2eval = hasattr(self, 'f2_evaluate')
f2solv = hasattr(self, 'f2_solve')
if not (f1eval or f2eval or f2solv):
raise ValueError('none of f1eval, f2eval, or f2solv are defined')
if f1eval and f2eval and f2solv:
return 2
elif f1eval and not (f2eval or f2solv):
return 1
elif not f1eval and (f2eval and f2solv):
return 1
else:
raise ValueError('feval is inconsistent')
class IMEXSDC(sdc.SDC):
r"""IMEXSDC (implicit/explicit SDC) class.
:param qtype: quadrature type
:param nodes: number of quadrature nodes
The SDC class performs implicit/explicit SDC sweeps and adds FAS
corrections if supplied.
Currently supported quadrature types are:
* ``'GL'``: Gauss-Labotto
* ``'CC'``: Clenshaw-Curtis
"""
def __init__(self, *args, **kwargs):
super(IMEXSDC, self).__init__(*args, **kwargs)
# construct implicit and explicit integration matrices
smat_exp = self.smat.copy()
smat_imp = self.smat.copy()
dsdc = self.nodes[1:] - self.nodes[:-1]
for m in range(self.nnodes-1):
smat_exp[m,m] = smat_exp[m,m] - dsdc[m]
smat_imp[m,m+1] = smat_imp[m,m+1] - dsdc[m]
self.smat_exp = smat_exp
self.smat_imp = smat_imp
self.dsdc = self.nodes[1:] - self.nodes[:-1]
###############################################################################
def evaluate(self, t0, qSDC, fSDC, node, feval, **kwargs):
nnodes = fSDC.shape[1]
f1eval = hasattr(feval, 'f1_evaluate')
f2eval = hasattr(feval, 'f2_evaluate')
if node == 'all':
for m in range(nnodes):
self.pf.state.node = m
self.level.call_hooks('pre-feval', **kwargs)
if f1eval and f2eval:
feval.f1_evaluate(qSDC[m], t0, fSDC[0, m], **kwargs)
feval.f2_evaluate(qSDC[m], t0, fSDC[1, m], **kwargs)
elif f1eval:
feval.f1_evaluate(qSDC[m], t0, fSDC[0, m], **kwargs)
else:
feval.f2_evaluate(qSDC[m], t0, fSDC[0, m], **kwargs)
self.level.call_hooks('post-feval', **kwargs)
else:
self.pf.state.node = node
self.level.call_hooks('pre-feval', **kwargs)
if f1eval and f2eval:
feval.f1_evaluate(qSDC[node], t0, fSDC[0, node], **kwargs)
feval.f2_evaluate(qSDC[node], t0, fSDC[1, node], **kwargs)
elif f1eval:
feval.f1_evaluate(qSDC[node], t0, fSDC[0, node], **kwargs)
else:
feval.f2_evaluate(qSDC[node], t0, fSDC[0, node], **kwargs)
self.level.call_hooks('post-feval', **kwargs)
###############################################################################
def sweep(self, t0, dt, F, **kwargs):
r"""Perform one SDC sweep.
Note that *qSDC* and *fSDC* are over-written.
The sweep performed uses forward/backward Euler time-stepping.
"""
exp = self.smat_exp
imp = self.smat_imp
qSDC = F.qSDC
fSDC = F.fSDC
feval = F.feval
pieces = fSDC.shape[0]
nnodes = fSDC.shape[1]
shape = fSDC.shape[2:]
size = feval.size
f1eval = hasattr(feval, 'f1_evaluate')
f2eval = hasattr(feval, 'f2_evaluate')
F.call_hooks('pre-sweep', **kwargs)
# flatten so we can use np.dot
fSDCf = fSDC.reshape((pieces, nnodes, size))
# integrate f
if f1eval and f2eval:
rhs = dt * (np.dot(exp, fSDCf[0]) + np.dot(imp, fSDCf[1]))
elif f1eval:
rhs = dt * np.dot(exp, fSDCf[0])
else:
rhs = dt * np.dot(imp, fSDCf[0])
rhs = rhs.reshape((nnodes-1,)+shape)
# add tau
if F.tau is not None:
rhs += F.tau
# set initial condition and eval
qSDC[0] = F.q0
self.evaluate(t0, qSDC, fSDC, 0, feval, **kwargs)
if F.gSDC is not None:
fSDC[0,0] += F.gSDC[0]
# sub time-stepping
t = t0
dtsdc = dt * self.dsdc
for m in range(self.nnodes-1):
t += dtsdc[m]
self.pf.state.node = m + 1
self.level.call_hooks('pre-feval', **kwargs)
if f1eval and f2eval:
# imex
q1 = qSDC[m] + dtsdc[m]*fSDC[0,m] + rhs[m]
feval.f2_solve(q1, qSDC[m+1], t, dtsdc[m], fSDC[1,m+1], **kwargs)
feval.f1_evaluate(qSDC[m+1], t, fSDC[0,m+1], **kwargs)
elif f1eval:
# explicit
qSDC[m+1] = qSDC[m] + dtsdc[m]*fSDC[0,m] + rhs[m]
feval.f1_evaluate(qSDC[m+1], t, fSDC[0,m+1], **kwargs)
else:
# implicit
q1 = qSDC[m] + dtsdc[m]*fSDC[0,m] + rhs[m]
feval.f2_solve(q1, qSDC[m+1], t, dtsdc[m], fSDC[0,m+1], **kwargs)
self.level.call_hooks('post-feval', **kwargs)
if F.gSDC is not None:
fSDC[0,m+1] += F.gSDC[m+1]
F.qend[...] = F.qSDC[-1]
self.pf.state.node = -1
F.call_hooks('post-sweep', **kwargs)
| |
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Actions to take on resources
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import base64
from datetime import datetime
import jmespath
import logging
import zlib
import six
from botocore.exceptions import ClientError
from c7n.executor import ThreadPoolExecutor
from c7n.manager import resources
from c7n.registry import PluginRegistry
from c7n.resolver import ValuesFrom
from c7n import utils
from c7n.version import version as VERSION
def average(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
def distinct_count(values):
return float(len(set(values)))
METRIC_OPS = {
'count': len,
'distinct_count': distinct_count,
'sum': sum,
'average': average,
}
METRIC_UNITS = [
# Time
'Seconds',
'Microseconds',
'Milliseconds',
# Bytes and Bits
'Bytes',
'Kilobytes',
'Megabytes',
'Gigabytes',
'Terabytes',
'Bits',
'Kilobits',
'Megabits',
'Gigabits',
'Terabits',
# Rates
'Bytes/Second',
'Kilobytes/Second',
'Megabytes/Second',
'Gigabytes/Second',
'Terabytes/Second',
'Bits/Second',
'Kilobits/Second',
'Megabits/Second',
'Gigabits/Second',
'Terabits/Second',
'Count/Second',
# Other Scalars
'Percent',
'Count',
'None'
]
class ActionRegistry(PluginRegistry):
def __init__(self, *args, **kw):
super(ActionRegistry, self).__init__(*args, **kw)
self.register('notify', Notify)
self.register('invoke-lambda', LambdaInvoke)
self.register('put-metric', PutMetric)
def parse(self, data, manager):
results = []
for d in data:
results.append(self.factory(d, manager))
return results
def factory(self, data, manager):
if isinstance(data, dict):
action_type = data.get('type')
if action_type is None:
raise ValueError(
"Invalid action type found in %s" % (data))
else:
action_type = data
data = {}
action_class = self.get(action_type)
if action_class is None:
raise ValueError(
"Invalid action type %s, valid actions %s" % (
action_type, list(self.keys())))
# Construct a ResourceManager
return action_class(data, manager).validate()
class Action(object):
permissions = ()
metrics = ()
log = logging.getLogger("custodian.actions")
executor_factory = ThreadPoolExecutor
permissions = ()
schema = {'type': 'object'}
def __init__(self, data=None, manager=None, log_dir=None):
self.data = data or {}
self.manager = manager
self.log_dir = log_dir
def get_permissions(self):
return self.permissions
def validate(self):
return self
@property
def name(self):
return self.__class__.__name__.lower()
def process(self, resources):
raise NotImplementedError(
"Base action class does not implement behavior")
def _run_api(self, cmd, *args, **kw):
try:
return cmd(*args, **kw)
except ClientError as e:
if (e.response['Error']['Code'] == 'DryRunOperation' and
e.response['ResponseMetadata']['HTTPStatusCode'] == 412 and
'would have succeeded' in e.response['Error']['Message']):
return self.log.info(
"Dry run operation %s succeeded" % (
self.__class__.__name__.lower()))
raise
BaseAction = Action
class ModifyVpcSecurityGroupsAction(Action):
"""Common actions for modifying security groups on a resource
Can target either physical groups as a list of group ids or
symbolic groups like 'matched' or 'all'. 'matched' uses
the annotations of the 'security-group' interface filter.
Note an interface always gets at least one security group, so
we mandate the specification of an isolation/quarantine group
that can be specified if there would otherwise be no groups.
type: modify-security-groups
add: []
remove: [] | matched
isolation-group: sg-xyz
"""
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['modify-security-groups']},
'add': {'oneOf': [
{'type': 'string', 'pattern': '^sg-*'},
{'type': 'array', 'items': {
'pattern': '^sg-*',
'type': 'string'}}]},
'remove': {'oneOf': [
{'type': 'array', 'items': {
'type': 'string', 'pattern': '^sg-*'}},
{'enum': [
'matched', 'all',
{'type': 'string', 'pattern': '^sg-*'}]}]},
'isolation-group': {'oneOf': [
{'type': 'string', 'pattern': '^sg-*'},
{'type': 'array', 'items': {
'type': 'string', 'pattern': '^sg-*'}}]}},
'anyOf': [
{'required': ['isolation-group', 'remove', 'type']},
{'required': ['add', 'remove', 'type']},
{'required': ['add', 'type']}]
}
def get_groups(self, resources, metadata_key=None):
"""Parse policies to get lists of security groups to attach to each resource
For each input resource, parse the various add/remove/isolation-
group policies for 'modify-security-groups' to find the resulting
set of VPC security groups to attach to that resource.
The 'metadata_key' parameter can be used for two purposes at
the moment; The first use is for resources' APIs that return a
list of security group IDs but use a different metadata key
than 'Groups' or 'SecurityGroups'.
The second use is for when there are richer objects in the 'Groups' or
'SecurityGroups' lists. The custodian actions need to act on lists of
just security group IDs, so the metadata_key can be used to select IDs
from the richer objects in the provided lists.
Returns a list of lists containing the resulting VPC security groups
that should end up on each resource passed in.
:param resources: List of resources containing VPC Security Groups
:param metadata_key: Metadata key for security groups list
:return: List of lists of security groups per resource
"""
# parse the add, remove, and isolation group params to return the
# list of security groups that will end up on the resource
# target_group_ids = self.data.get('groups', 'matched')
add_target_group_ids = self.data.get('add', None)
remove_target_group_ids = self.data.get('remove', None)
isolation_group = self.data.get('isolation-group')
add_groups = []
remove_groups = []
return_groups = []
for idx, r in enumerate(resources):
if r.get('Groups'):
if metadata_key and isinstance(r['Groups'][0], dict):
rgroups = [g[metadata_key] for g in r['SecurityGroups']]
else:
rgroups = [g['GroupId'] for g in r['Groups']]
elif r.get('SecurityGroups'):
if metadata_key and isinstance(r['SecurityGroups'][0], dict):
rgroups = [g[metadata_key] for g in r['SecurityGroups']]
else:
rgroups = [g for g in r['SecurityGroups']]
elif r.get('VpcSecurityGroups'):
if metadata_key and isinstance(r['VpcSecurityGroups'][0], dict):
rgroups = [g[metadata_key] for g in r['VpcSecurityGroups']]
else:
rgroups = [g for g in r['VpcSecurityGroups']]
# use as substitution for 'Groups' or '[Vpc]SecurityGroups'
# unsure if necessary - defer to coverage report
elif metadata_key and r.get(metadata_key):
rgroups = [g for g in r[metadata_key]]
# Parse remove_groups
if remove_target_group_ids == 'matched':
remove_groups = r.get('c7n:matched-security-groups', ())
elif remove_target_group_ids == 'all':
remove_groups = rgroups
elif isinstance(remove_target_group_ids, list):
remove_groups = remove_target_group_ids
elif isinstance(remove_target_group_ids, six.string_types):
remove_groups = [remove_target_group_ids]
# Parse add_groups
if isinstance(add_target_group_ids, list):
add_groups = add_target_group_ids
elif isinstance(add_target_group_ids, six.string_types):
add_groups = [add_target_group_ids]
# seems extraneous with list?
# if not remove_groups and not add_groups:
# continue
for g in remove_groups:
if g in rgroups:
rgroups.remove(g)
for g in add_groups:
if g not in rgroups:
rgroups.append(g)
if not rgroups:
rgroups.append(isolation_group)
return_groups.append(rgroups)
return return_groups
class EventAction(BaseAction):
"""Actions which receive lambda event if present
"""
class LambdaInvoke(EventAction):
""" Invoke an arbitrary lambda
serialized invocation parameters
- resources / collection of resources
- policy / policy that is invoke the lambda
- action / action that is invoking the lambda
- event / cloud trail event if any
- version / version of custodian invoking the lambda
We automatically batch into sets of 250 for invocation,
We try to utilize async invocation by default, this imposes
some greater size limits of 128kb which means we batch
invoke.
Example::
- type: invoke-lambda
function: my-function
"""
schema = utils.type_schema(
'invoke-lambda',
function={'type': 'string'},
async={'type': 'boolean'},
qualifier={'type': 'string'},
batch_size={'type': 'integer'},
required=('function',))
def get_permissions(self):
if self.data.get('async', True):
return ('lambda:InvokeAsync',)
return ('lambda:Invoke',)
permissions = ('lambda:InvokeFunction',)
def process(self, resources, event=None):
client = utils.local_session(
self.manager.session_factory).client('lambda')
params = dict(FunctionName=self.data['function'])
if self.data.get('qualifier'):
params['Qualifier'] = self.data['Qualifier']
if self.data.get('async', True):
params['InvocationType'] = 'Event'
payload = {
'version': VERSION,
'event': event,
'action': self.data,
'policy': self.manager.data}
results = []
for resource_set in utils.chunks(resources, self.data.get('batch_size', 250)):
payload['resources'] = resource_set
params['Payload'] = utils.dumps(payload)
result = client.invoke(**params)
result['Payload'] = result['Payload'].read()
results.append(result)
return results
class Notify(EventAction):
"""
Flexible notifications require quite a bit of implementation support
on pluggable transports, templates, address resolution, variable
extraction, batch periods, etc.
For expedience and flexibility then, we instead send the data to
an sqs queue, for processing. ie. actual communications is DIY atm.
Example::
policies:
- name: ec2-bad-instance-kill
resource: ec2
filters:
- Name: bad-instance
actions:
- terminate
- type: notify
to:
- event-user
- resource-creator
- email@address
# which template for the email should we use
template: policy-template
transport:
type: sqs
region: us-east-1
queue: xyz
"""
C7N_DATA_MESSAGE = "maidmsg/1.0"
schema = {
'type': 'object',
'anyOf': [
{'required': ['type', 'transport', 'to']},
{'required': ['type', 'transport', 'to_from']}],
'properties': {
'type': {'enum': ['notify']},
'to': {'type': 'array', 'items': {'type': 'string'}},
'to_from': ValuesFrom.schema,
'cc': {'type': 'array', 'items': {'type': 'string'}},
'cc_from': ValuesFrom.schema,
'cc_manager': {'type': 'boolean'},
'from': {'type': 'string'},
'subject': {'type': 'string'},
'template': {'type': 'string'},
'transport': {
'oneOf': [
{'type': 'object',
'required': ['type', 'queue'],
'properties': {
'queue': {'type': 'string'},
'type': {'enum': ['sqs']}}},
{'type': 'object',
'required': ['type', 'topic'],
'properties': {
'topic': {'type': 'string'},
'type': {'enum': ['sns']},
}}]
},
'assume_role': {'type': 'boolean'}
}
}
batch_size = 250
def __init__(self, data=None, manager=None, log_dir=None):
super(Notify, self).__init__(data, manager, log_dir)
self.assume_role = data.get('assume_role', True)
def get_permissions(self):
if self.data.get('transport', {}).get('type') == 'sns':
return ('sns:Publish',)
if self.data.get('transport', {'type': 'sqs'}).get('type') == 'sqs':
return ('sqs:SendMessage',)
return ()
def expand_variables(self, message):
"""expand any variables in the action to_from/cc_from fields.
"""
p = self.data.copy()
if 'to_from' in self.data:
to_from = self.data['to_from'].copy()
to_from['url'] = to_from['url'].format(**message)
if 'expr' in to_from:
to_from['expr'] = to_from['expr'].format(**message)
p.setdefault('to', []).extend(ValuesFrom(to_from, self.manager).get_values())
if 'cc_from' in self.data:
cc_from = self.data['cc_from'].copy()
cc_from['url'] = cc_from['url'].format(**message)
if 'expr' in cc_from:
cc_from['expr'] = cc_from['expr'].format(**message)
p.setdefault('cc', []).extend(ValuesFrom(cc_from, self.manager).get_values())
return p
def process(self, resources, event=None):
alias = utils.get_account_alias_from_sts(
utils.local_session(self.manager.session_factory))
message = {
'event': event,
'account_id': self.manager.config.account_id,
'account': alias,
'region': self.manager.config.region,
'policy': self.manager.data}
message['action'] = self.expand_variables(message)
for batch in utils.chunks(resources, self.batch_size):
message['resources'] = batch
receipt = self.send_data_message(message)
self.log.info("sent message:%s policy:%s template:%s count:%s" % (
receipt, self.manager.data['name'],
self.data.get('template', 'default'), len(batch)))
def send_data_message(self, message):
if self.data['transport']['type'] == 'sqs':
return self.send_sqs(message)
elif self.data['transport']['type'] == 'sns':
return self.send_sns(message)
def pack(self, message):
dumped = utils.dumps(message)
compressed = zlib.compress(dumped.encode('utf8'))
b64encoded = base64.b64encode(compressed)
return b64encoded.decode('ascii')
def send_sns(self, message):
topic = self.data['transport']['topic'].format(**message)
if topic.startswith('arn:aws:sns'):
region = region = topic.split(':', 5)[3]
topic_arn = topic
else:
region = message['region']
topic_arn = "arn:aws:sns:%s:%s:%s" % (
message['region'], message['account_id'], topic)
client = self.manager.session_factory(
region=region, assume=self.assume_role).client('sns')
client.publish(TopicArn=topic_arn, Message=self.pack(message))
def send_sqs(self, message):
queue = self.data['transport']['queue'].format(**message)
if queue.startswith('https://queue.amazonaws.com'):
region = 'us-east-1'
queue_url = queue
elif queue.startswith('https://sqs.'):
region = queue.split('.', 2)[1]
queue_url = queue
elif queue.startswith('arn:aws:sqs'):
queue_arn_split = queue.split(':', 5)
region = queue_arn_split[3]
owner_id = queue_arn_split[4]
queue_name = queue_arn_split[5]
queue_url = "https://sqs.%s.amazonaws.com/%s/%s" % (
region, owner_id, queue_name)
else:
region = self.manager.config.region
owner_id = self.manager.config.account_id
queue_name = queue
queue_url = "https://sqs.%s.amazonaws.com/%s/%s" % (
region, owner_id, queue_name)
client = self.manager.session_factory(
region=region, assume=self.assume_role).client('sqs')
attrs = {
'mtype': {
'DataType': 'String',
'StringValue': self.C7N_DATA_MESSAGE,
},
}
result = client.send_message(
QueueUrl=queue_url,
MessageBody=self.pack(message),
MessageAttributes=attrs)
return result['MessageId']
class AutoTagUser(EventAction):
"""Tag a resource with the user who created/modified it.
.. code-block:: yaml
policies:
- name: ec2-auto-tag-ownercontact
resource: ec2
description: |
Triggered when a new EC2 Instance is launched. Checks to see if
it's missing the OwnerContact tag. If missing it gets created
with the value of the ID of whomever called the RunInstances API
mode:
type: cloudtrail
role: arn:aws:iam::123456789000:role/custodian-auto-tagger
events:
- RunInstances
filters:
- tag:OwnerContact: absent
actions:
- type: auto-tag-user
tag: OwnerContact
There's a number of caveats to usage. Resources which don't
include tagging as part of their api may have some delay before
automation kicks in to create a tag. Real world delay may be several
minutes, with worst case into hours[0]. This creates a race condition
between auto tagging and automation.
In practice this window is on the order of a fraction of a second, as
we fetch the resource and evaluate the presence of the tag before
attempting to tag it.
References
- AWS Config (see REQUIRED_TAGS caveat) - http://goo.gl/oDUXPY
- CloudTrail User - http://goo.gl/XQhIG6
"""
schema = utils.type_schema(
'auto-tag-user',
required=['tag'],
**{'user-type': {
'type': 'array',
'items': {'type': 'string',
'enum': [
'IAMUser',
'AssumedRole',
'FederatedUser'
]}},
'update': {'type': 'boolean'},
'tag': {'type': 'string'},
'principal_id_tag': {'type': 'string'}
}
)
def get_permissions(self):
return self.manager.action_registry.get(
'tag')({}, self.manager).get_permissions()
def validate(self):
if self.manager.data.get('mode', {}).get('type') != 'cloudtrail':
raise ValueError("Auto tag owner requires an event")
if self.manager.action_registry.get('tag') is None:
raise ValueError("Resource does not support tagging")
return self
def process(self, resources, event):
if event is None:
return
event = event['detail']
utype = event['userIdentity']['type']
if utype not in self.data.get('user-type', ['AssumedRole', 'IAMUser']):
return
user = None
if utype == "IAMUser":
user = event['userIdentity']['userName']
principal_id_value = event['userIdentity'].get('principalId', '')
elif utype == "AssumedRole":
user = event['userIdentity']['arn']
prefix, user = user.rsplit('/', 1)
principal_id_value = event['userIdentity'].get('principalId', '').split(':')[0]
# instance role
if user.startswith('i-'):
return
# lambda function (old style)
elif user.startswith('awslambda'):
return
if user is None:
return
# if the auto-tag-user policy set update to False (or it's unset) then we
# will skip writing their UserName tag and not overwrite pre-existing values
if not self.data.get('update', False):
untagged_resources = []
# iterating over all the resources the user spun up in this event
for resource in resources:
tag_already_set = False
for tag in resource.get('Tags', ()):
if tag['Key'] == self.data['tag']:
tag_already_set = True
break
if not tag_already_set:
untagged_resources.append(resource)
# if update is set to True, we will overwrite the userName tag even if
# the user already set a value
else:
untagged_resources = resources
tag_action = self.manager.action_registry.get('tag')
new_tags = {
self.data['tag']: user
}
# if principal_id_key is set (and value), we'll set the principalId tag.
principal_id_key = self.data.get('principal_id_tag', None)
if principal_id_key and principal_id_value:
new_tags[principal_id_key] = principal_id_value
for key, value in six.iteritems(new_tags):
tag_action({'key': key, 'value': value}, self.manager).process(untagged_resources)
return new_tags
def add_auto_tag_user(registry, _):
for resource in registry.keys():
klass = registry.get(resource)
if klass.action_registry.get('tag') and not klass.action_registry.get('auto-tag-user'):
klass.action_registry.register('auto-tag-user', AutoTagUser)
resources.subscribe(resources.EVENT_FINAL, add_auto_tag_user)
class PutMetric(BaseAction):
"""Action to put metrics based on an expression into CloudWatch metrics
:example:
.. code-block: yaml
policies:
- name: track-attached-ebs
resource: ec2
comment: |
Put the count of the number of EBS attached disks to an instance
filters:
- Name: tracked-ec2-instance
actions:
- type: put-metric
key: Reservations[].Instances[].BlockDeviceMappings[].DeviceName
namespace: Usage Metrics
metric_name: Attached Disks
op: count
units: Files
op and units are optional and will default to simple Counts.
"""
# permissions are typically lowercase servicename:TitleCaseActionName
permissions = {'cloudwatch:PutMetricData', }
schema = {
'type': 'object',
'required': ['type', 'key', 'namespace', 'metric_name'],
'properties': {
'type': {'enum': ['put-metric', ]},
'key': {'type': 'string'}, # jmes path
'namespace': {'type': 'string'},
'metric_name': {'type': 'string'},
'dimensions':
{'type':'array',
'items': {
'type':'object'
},
},
'op': {'enum': list(METRIC_OPS.keys())},
'units': {'enum': METRIC_UNITS}
}
}
def process(self, resources):
ns = self.data['namespace']
metric_name = self.data['metric_name']
key_expression = self.data.get('key', 'Resources[]')
operation = self.data.get('op', 'count')
units = self.data.get('units', 'Count')
# dimensions are passed as a list of dicts
dimensions = self.data.get('dimensions', [])
now = datetime.utcnow()
# reduce the resources by the key expression, and apply the operation to derive the value
values = []
self.log.debug("searching for %s in %s", key_expression, resources)
try:
values = jmespath.search("Resources[]." + key_expression,
{'Resources': resources})
# I had to wrap resourses in a dict like this in order to not have jmespath expressions
# start with [] in the yaml files. It fails to parse otherwise.
except TypeError as oops:
self.log.error(oops.message)
value = 0
try:
f = METRIC_OPS[operation]
value = f(values)
except KeyError:
self.log.error("Bad op for put-metric action: %s", operation)
# for demo purposes
# from math import sin, pi
# value = sin((now.minute * 6 * 4 * pi) / 180) * ((now.hour + 1) * 4.0)
metrics_data = [
{
'MetricName': metric_name,
'Dimensions': [{'Name': i[0], 'Value': i[1]}
for d in dimensions
for i in d.items()],
'Timestamp': now,
'Value': value,
# TODO: support an operation of 'stats' to include this
# structure instead of a single Value
# Value and StatisticValues are mutually exclusive.
# 'StatisticValues': {
# 'SampleCount': 1,
# 'Sum': 123.0,
# 'Minimum': 123.0,
# 'Maximum': 123.0
# },
'Unit': units,
},
]
client = self.manager.session_factory().client('cloudwatch')
client.put_metric_data(Namespace=ns, MetricData=metrics_data)
return resources
class RemovePolicyBase(BaseAction):
schema = utils.type_schema(
'remove-statements',
required=['statement_ids'],
statement_ids={'oneOf': [
{'enum': ['matched']},
{'type': 'array', 'items': {'type': 'string'}}]})
def process_policy(self, policy, resource, matched_key):
statement_ids = self.data.get('statement_ids')
found = []
statements = policy.get('Statement', [])
resource_statements = resource.get(
matched_key, ())
for s in list(statements):
if statement_ids == 'matched':
if s in resource_statements:
found.append(s)
statements.remove(s)
elif s['Sid'] in self.data['statement_ids']:
found.append(s)
statements.remove(s)
if not found:
return None, found
return statements, found
| |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
try:
import json
except ImportError:
import simplejson as json
import logging
import os
import debtcollector.renames
from keystoneauth1 import access
from keystoneauth1 import adapter
from oslo_utils import importutils
import requests
from neutronclient._i18n import _
from neutronclient.common import exceptions
from neutronclient.common import utils
osprofiler_web = importutils.try_import("osprofiler.web")
_logger = logging.getLogger(__name__)
if os.environ.get('NEUTRONCLIENT_DEBUG'):
ch = logging.StreamHandler()
_logger.setLevel(logging.DEBUG)
_logger.addHandler(ch)
_requests_log_level = logging.DEBUG
else:
_requests_log_level = logging.WARNING
logging.getLogger("requests").setLevel(_requests_log_level)
MAX_URI_LEN = 8192
USER_AGENT = 'python-neutronclient'
REQ_ID_HEADER = 'X-OpenStack-Request-ID'
class HTTPClient(object):
"""Handles the REST calls and responses, include authn."""
CONTENT_TYPE = 'application/json'
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
@debtcollector.renames.renamed_kwarg(
'tenant_name', 'project_name', replace=True)
def __init__(self, username=None, user_id=None,
project_name=None, project_id=None,
password=None, auth_url=None,
token=None, region_name=None, timeout=None,
endpoint_url=None, insecure=False,
endpoint_type='publicURL',
auth_strategy='keystone', ca_cert=None, log_credentials=False,
service_type='network', global_request_id=None,
**kwargs):
self.username = username
self.user_id = user_id
self.project_name = project_name
self.project_id = project_id
self.password = password
self.auth_url = auth_url.rstrip('/') if auth_url else None
self.service_type = service_type
self.endpoint_type = endpoint_type
self.region_name = region_name
self.timeout = timeout
self.auth_token = token
self.auth_tenant_id = None
self.auth_user_id = None
self.endpoint_url = endpoint_url
self.auth_strategy = auth_strategy
self.log_credentials = log_credentials
self.global_request_id = global_request_id
if insecure:
self.verify_cert = False
else:
self.verify_cert = ca_cert if ca_cert else True
def _cs_request(self, *args, **kwargs):
kargs = {}
kargs.setdefault('headers', kwargs.get('headers', {}))
kargs['headers']['User-Agent'] = USER_AGENT
if 'body' in kwargs:
kargs['body'] = kwargs['body']
if self.log_credentials:
log_kargs = kargs
else:
log_kargs = self._strip_credentials(kargs)
utils.http_log_req(_logger, args, log_kargs)
try:
resp, body = self.request(*args, **kargs)
except requests.exceptions.SSLError as e:
raise exceptions.SslCertificateValidationError(reason=e)
except Exception as e:
# Wrap the low-level connection error (socket timeout, redirect
# limit, decompression error, etc) into our custom high-level
# connection exception (it is excepted in the upper layers of code)
_logger.debug("throwing ConnectionFailed : %s", e)
raise exceptions.ConnectionFailed(reason=e)
utils.http_log_resp(_logger, resp, body)
# log request-id for each api call
request_id = resp.headers.get('x-openstack-request-id')
if request_id:
_logger.debug('%(method)s call to neutron for '
'%(url)s used request id '
'%(response_request_id)s',
{'method': resp.request.method,
'url': resp.url,
'response_request_id': request_id})
if resp.status_code == 401:
raise exceptions.Unauthorized(message=body)
return resp, body
def _strip_credentials(self, kwargs):
if kwargs.get('body') and self.password:
log_kwargs = kwargs.copy()
log_kwargs['body'] = kwargs['body'].replace(self.password,
'REDACTED')
return log_kwargs
else:
return kwargs
def authenticate_and_fetch_endpoint_url(self):
if not self.auth_token:
self.authenticate()
elif not self.endpoint_url:
self.endpoint_url = self._get_endpoint_url()
def request(self, url, method, body=None, headers=None, **kwargs):
"""Request without authentication."""
content_type = kwargs.pop('content_type', None) or 'application/json'
headers = headers or {}
headers.setdefault('Accept', content_type)
if body:
headers.setdefault('Content-Type', content_type)
if self.global_request_id:
headers.setdefault(REQ_ID_HEADER, self.global_request_id)
headers['User-Agent'] = USER_AGENT
# NOTE(dbelova): osprofiler_web.get_trace_id_headers does not add any
# headers in case if osprofiler is not initialized.
if osprofiler_web:
headers.update(osprofiler_web.get_trace_id_headers())
resp = requests.request(
method,
url,
data=body,
headers=headers,
verify=self.verify_cert,
timeout=self.timeout,
**kwargs)
return resp, resp.text
def _check_uri_length(self, action):
uri_len = len(self.endpoint_url) + len(action)
if uri_len > MAX_URI_LEN:
raise exceptions.RequestURITooLong(
excess=uri_len - MAX_URI_LEN)
def do_request(self, url, method, **kwargs):
# Ensure client always has correct uri - do not guesstimate anything
self.authenticate_and_fetch_endpoint_url()
self._check_uri_length(url)
# Perform the request once. If we get a 401 back then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
kwargs['headers'] = kwargs.get('headers') or {}
if self.auth_token is None:
self.auth_token = ""
kwargs['headers']['X-Auth-Token'] = self.auth_token
resp, body = self._cs_request(self.endpoint_url + url, method,
**kwargs)
return resp, body
except exceptions.Unauthorized:
self.authenticate()
kwargs['headers'] = kwargs.get('headers') or {}
kwargs['headers']['X-Auth-Token'] = self.auth_token
resp, body = self._cs_request(
self.endpoint_url + url, method, **kwargs)
return resp, body
def _extract_service_catalog(self, body):
"""Set the client's service catalog from the response data."""
self.auth_ref = access.create(body=body)
self.service_catalog = self.auth_ref.service_catalog
self.auth_token = self.auth_ref.auth_token
self.auth_tenant_id = self.auth_ref.tenant_id
self.auth_user_id = self.auth_ref.user_id
if not self.endpoint_url:
self.endpoint_url = self.service_catalog.url_for(
region_name=self.region_name,
service_type=self.service_type,
interface=self.endpoint_type)
def _authenticate_keystone(self):
if self.user_id:
creds = {'userId': self.user_id,
'password': self.password}
else:
creds = {'username': self.username,
'password': self.password}
if self.project_id:
body = {'auth': {'passwordCredentials': creds,
'tenantId': self.project_id, }, }
else:
body = {'auth': {'passwordCredentials': creds,
'tenantName': self.project_name, }, }
if self.auth_url is None:
raise exceptions.NoAuthURLProvided()
token_url = self.auth_url + "/tokens"
resp, resp_body = self._cs_request(token_url, "POST",
body=json.dumps(body),
content_type="application/json",
allow_redirects=True)
if resp.status_code != 200:
raise exceptions.Unauthorized(message=resp_body)
if resp_body:
try:
resp_body = json.loads(resp_body)
except ValueError:
pass
else:
resp_body = None
self._extract_service_catalog(resp_body)
def _authenticate_noauth(self):
if not self.endpoint_url:
message = _('For "noauth" authentication strategy, the endpoint '
'must be specified either in the constructor or '
'using --os-url')
raise exceptions.Unauthorized(message=message)
def authenticate(self):
if self.auth_strategy == 'keystone':
self._authenticate_keystone()
elif self.auth_strategy == 'noauth':
self._authenticate_noauth()
else:
err_msg = _('Unknown auth strategy: %s') % self.auth_strategy
raise exceptions.Unauthorized(message=err_msg)
def _get_endpoint_url(self):
if self.auth_url is None:
raise exceptions.NoAuthURLProvided()
url = self.auth_url + '/tokens/%s/endpoints' % self.auth_token
try:
resp, body = self._cs_request(url, "GET")
except exceptions.Unauthorized:
# rollback to authenticate() to handle case when neutron client
# is initialized just before the token is expired
self.authenticate()
return self.endpoint_url
body = json.loads(body)
for endpoint in body.get('endpoints', []):
if (endpoint['type'] == 'network' and
endpoint.get('region') == self.region_name):
if self.endpoint_type not in endpoint:
raise exceptions.EndpointTypeNotFound(
type_=self.endpoint_type)
return endpoint[self.endpoint_type]
raise exceptions.EndpointNotFound()
def get_auth_info(self):
return {'auth_token': self.auth_token,
'auth_tenant_id': self.auth_tenant_id,
'auth_user_id': self.auth_user_id,
'endpoint_url': self.endpoint_url}
def get_auth_ref(self):
return getattr(self, 'auth_ref', None)
class SessionClient(adapter.Adapter):
def request(self, *args, **kwargs):
kwargs.setdefault('authenticated', False)
kwargs.setdefault('raise_exc', False)
content_type = kwargs.pop('content_type', None) or 'application/json'
headers = kwargs.get('headers') or {}
headers.setdefault('Accept', content_type)
# NOTE(dbelova): osprofiler_web.get_trace_id_headers does not add any
# headers in case if osprofiler is not initialized.
if osprofiler_web:
headers.update(osprofiler_web.get_trace_id_headers())
try:
kwargs.setdefault('data', kwargs.pop('body'))
except KeyError:
pass
if kwargs.get('data'):
headers.setdefault('Content-Type', content_type)
kwargs['headers'] = headers
resp = super(SessionClient, self).request(*args, **kwargs)
return resp, resp.text
def _check_uri_length(self, url):
uri_len = len(self.endpoint_url) + len(url)
if uri_len > MAX_URI_LEN:
raise exceptions.RequestURITooLong(
excess=uri_len - MAX_URI_LEN)
def do_request(self, url, method, **kwargs):
kwargs.setdefault('authenticated', True)
self._check_uri_length(url)
return self.request(url, method, **kwargs)
@property
def endpoint_url(self):
# NOTE(jamielennox): This is used purely by the CLI and should be
# removed when the CLI gets smarter.
return self.get_endpoint()
@property
def auth_token(self):
# NOTE(jamielennox): This is used purely by the CLI and should be
# removed when the CLI gets smarter.
return self.get_token()
def authenticate(self):
# NOTE(jamielennox): This is used purely by the CLI and should be
# removed when the CLI gets smarter.
self.get_token()
def get_auth_info(self):
auth_info = {'auth_token': self.auth_token,
'endpoint_url': self.endpoint_url}
# NOTE(jamielennox): This is the best we can do here. It will work
# with identity plugins which is the primary case but we should
# deprecate it's usage as much as possible.
try:
get_access = (self.auth or self.session.auth).get_access
except AttributeError:
pass
else:
auth_ref = get_access(self.session)
auth_info['auth_tenant_id'] = auth_ref.project_id
auth_info['auth_user_id'] = auth_ref.user_id
return auth_info
def get_auth_ref(self):
return self.session.auth.get_auth_ref(self.session)
# FIXME(bklei): Should refactor this to use kwargs and only
# explicitly list arguments that are not None.
@debtcollector.renames.renamed_kwarg('tenant_id', 'project_id', replace=True)
@debtcollector.renames.renamed_kwarg(
'tenant_name', 'project_name', replace=True)
def construct_http_client(username=None,
user_id=None,
project_name=None,
project_id=None,
password=None,
auth_url=None,
token=None,
region_name=None,
timeout=None,
endpoint_url=None,
insecure=False,
endpoint_type='public',
log_credentials=None,
auth_strategy='keystone',
ca_cert=None,
service_type='network',
session=None,
global_request_id=None,
**kwargs):
if session:
kwargs.setdefault('user_agent', USER_AGENT)
kwargs.setdefault('interface', endpoint_type)
return SessionClient(session=session,
service_type=service_type,
region_name=region_name,
global_request_id=global_request_id,
**kwargs)
else:
# FIXME(bklei): username and password are now optional. Need
# to test that they were provided in this mode. Should also
# refactor to use kwargs.
return HTTPClient(username=username,
password=password,
project_id=project_id,
project_name=project_name,
user_id=user_id,
auth_url=auth_url,
token=token,
endpoint_url=endpoint_url,
insecure=insecure,
timeout=timeout,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
ca_cert=ca_cert,
log_credentials=log_credentials,
auth_strategy=auth_strategy,
global_request_id=global_request_id)
| |
#!/usr/bin/env python
# coding=utf-8
import time, datetime, logging, threading, sys, traceback, hashlib
from suit import dbpc
from . import CFG
from ..util import rectify, transfer
from .. import Field
ORDER = {1:'asc', -1:'desc'}
class IdField(Field):
def __init__(self, strict=False, **attributes):
if not strict and not 'default' in attributes:
attributes['default'] = 0
attributes['ddl'] = '%s(%d) not null auto_increment' % ('int', 11)
attributes['pyt'] = int
super(IdField, self).__init__(**attributes)
@classmethod
def verify(cls, val):
return int(val)
class PassField(Field):
def __init__(self, strict=False, **attributes):
if not strict and not 'default' in attributes:
m = hashlib.md5()
origin = '123456'
m.update(origin)
secret = m.hexdigest()
attributes['default'] = m.hexdigest()
attributes['ddl'] = 'ObjectId'
attributes['pyt'] = ObjectId
super(IdField, self).__init__(**attributes)
@classmethod
def verify(cls, val):
m = hashlib.md5()
origin = '123456'
m.update(val)
return m.hexdigest()
class StrField(Field):
def __init__(self, strict=False, **attributes):
if not strict and not 'default' in attributes:
attributes['default'] = ''
attributes['ddl'] = '%s(%d)' % (attributes.get('ddl', 'varchar'), attributes.get('max_length', 255))
attributes['pyt'] = str
super(StrField, self).__init__(**attributes)
class IntField(Field):
def __init__(self, strict=False, **attributes):
if not strict and not 'default' in attributes:
attributes['default'] = 0
attributes['ddl'] = '%s(%d)' % (attributes.get('ddl', 'int'), attributes.get('max_length', 11))
attributes['pyt'] = int
super(IntField, self).__init__(**attributes)
class FloatField(Field):
def __init__(self, strict=False, **attributes):
if not strict and not 'default' in attributes:
attributes['default'] = 0.0
if not 'ddl' in attributes or attributes['ddl'] == 'float':
attributes['ddl'] = 'float'
else:
attributes['ddl'] = '%s(%d)' % (attributes.get('ddl', 'double'), attributes.get('max_length', 11))
attributes['pyt'] = float
super(FloatField, self).__init__(**attributes)
class BoolField(Field):
def __init__(self, strict=False, **attributes):
if not strict and not 'default' in attributes:
attributes['default'] = False
if not 'ddl' in attributes:
attributes['ddl'] = 'bool'
attributes['pyt'] = bool
super(BoolField, self).__init__(**attributes)
class TextField(Field):
def __init__(self, strict=False, **attributes):
if not strict and not 'default' in attributes:
attributes['default'] = ''
if not 'ddl' in attributes:
attributes['ddl'] = 'text'
attributes['pyt'] = str
super(TextField, self).__init__(**attributes)
class DatetimeField(Field):
def __init__(self, strict=False, **attributes):
if not strict and not 'default' in attributes:
attributes['default'] = datetime.datetime.now()
if not 'ddl' in attributes:
attributes['ddl'] = 'datetime'
if attributes['ddl'] == 'timestamp':
self.default = 'current_timestamp on update current_timestamp'
attributes['pyt'] = datetime.datetime
super(DatetimeField, self).__init__(**attributes)
class ListField(Field):
def __init__(self, strict=False, **attributes):
if not strict and not 'default' in attributes:
attributes['default'] = []
if not 'ddl' in attributes:
attributes['ddl'] = 'list'
attributes['pyt'] = list
super(ListField, self).__init__(**attributes)
class DictField(Field):
def __init__(self, strict=False, **attributes):
if not strict and not 'default' in attributes:
attributes['default'] = {}
if not 'ddl' in attributes:
attributes['ddl'] = 'dict'
attributes['pyt'] = dict
super(DictField, self).__init__(**attributes)
_triggers = frozenset(['pre_insert', 'pre_update', 'pre_delete'])
def genDoc(cls):
tablename = cls.__table__
tablefields = cls.__mappings__
pk = None
uniques = {}
doc = ['-- generating DOC for %s:' % tablename, 'create table if not exists `%s` (' % tablename]
for f in sorted(tablefields.values(), lambda x, y: cmp(x.order, y.order)):
if not hasattr(f, 'ddl'):
raise StandardError('no ddl in field "%s".' % n)
ddl = f.ddl
nullable = f.nullable
if f.unique:
if f.unique in uniques:
uniques[f.unique].append(f.name)
else:
uniques[f.unique] = [f.name]
doc.append(nullable and ' `%s` %s,' % (f.name, ddl) or ' `%s` %s not null default %s,' % (f.name, ddl, f.default))
if uniques:
doc.append(' primary key (`%s`),' % 'id' if cls.id_name == '_id' else cls.id_name)
doc.append(',\n'.join(' unique key `%s` (%s)' % (key, ','.join('`'+one+'`' for one in val)) for key, val in uniques.items()))
else:
doc.append(' primary key (`%s`)' % 'id' if cls.id_name == '_id' else cls.id_name)
doc.append(');')
return '\n'.join(doc)
class ModelMetaclass(type):
'''
Metaclass for model objects.
'''
def __new__(cls, name, bases, attrs):
for b in bases:
attrs = dict(getattr(b, '__mappings__', {}), **attrs)
# skip base Model class:
if name=='Model':
return type.__new__(cls, name, bases, attrs)
# store all subclasses info:
if not hasattr(cls, 'subclasses'):
cls.subclasses = {}
if not name in cls.subclasses:
cls.subclasses[name] = name
else:
pass
mappings = dict()
search = {}
has_id = False
cls.id_name = '_id'
for k, v in attrs.iteritems():
if isinstance(v, Field):
if not v.name:
v.name = k
mappings[k] = v
if v.searchable:
search[k] = v.searchable
if v.primary:
has_id = True
cls.id_name = v.name
if not has_id:
attrs[cls.id_name] = IdField(primary=True)
attrs[cls.id_name].name = cls.id_name
mappings[cls.id_name] = attrs[cls.id_name]
for k in mappings.iterkeys():
attrs.pop(k)
attrs['__mappings__'] = mappings
attrs['__search__'] = search
attrs['_insertsql'] = []
attrs['_insertdatas'] = []
cls.genDoc = lambda cls:genDoc(cls)
return type.__new__(cls, name, bases, attrs)
class Model(dict):
__table__ = None
__metaclass__ = ModelMetaclass
__lock = None
def __init__(self, **attributes):
super(Model, self).__init__(**attributes)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def __setstate__(self, state):
self.__dict__ = state
def __getstate__(self):
return self.__dict__
@classmethod
def queryOne(cls, spec, projection={}, sort=[], update=False):
'''
Find by where clause and return one result. If multiple results found,
only the first one returned. If no result found, return None.
'''
keys = []
args = []
rectify(cls, '_id', spec)
where = transfer(spec, grand=None, parent='', index=keys, condition=args)
if projection:
projection['_id'] = projection.get('_id', 1)
projection = ['id' if k == '_id' else k for k, v in projection.items() if v == 1]
else:
projection = ['id' if k == '_id' else k for k in cls.__mappings__.keys()]
projection = ','.join(['`%s` as _id' % c if c == 'id' else '`%s`' % c for c in projection])
if sort:
sort = 'order by ' + ','.join(['%s %s' % (one[0], ORDER.get(one[-1], 'asc')) for one in sort])
else:
sort = ''
if update:
update = 'for update'
else:
update = ''
if where:
where = 'where %s' % where
d = dbpc.handler.queryOne('select %s from `%s` %s %s limit %d, %d %s' % (projection, cls.__table__, where, sort, 0, 1, update), [args[index][one] for index, one in enumerate(keys)])
return d
@classmethod
def queryAll(cls, spec, projection={}, sort=[], skip=0, limit=None, update=False):
'''
Find all and return list.
'''
keys = []
args = []
rectify(cls, '_id', spec)
where = transfer(spec, grand=None, parent='', index=keys, condition=args)
if projection:
projection['_id'] = projection.get('_id', 1)
projection = ['id' if k == '_id' else k for k, v in projection.items() if v == 1]
else:
projection = ['id' if k == '_id' else k for k in cls.__mappings__.keys()]
projection = ','.join(['`%s` as _id' % c if c == 'id' else '`%s`' % c for c in projection])
if sort:
sort = 'order by ' + ','.join(['%s %s' % (one[0], ORDER.get(one[-1], 'asc')) for one in sort])
else:
sort = ''
if update:
update = 'for update'
else:
update = ''
if where:
where = 'where %s' % where
if limit is None:
L = dbpc.handler.queryAll('select %s from `%s` %s %s %s' % (projection, cls.__table__, where, sort, update), [args[index][one] for index, one in enumerate(keys)])
else:
L = dbpc.handler.queryAll('select %s from `%s` %s %s limit %d, %d %s' % (projection, cls.__table__, where, sort, skip, limit, update), [args[index][one] for index, one in enumerate(keys)])
return L
@classmethod
def count(cls, spec):
'''
Find by 'select count(pk) from table where ... ' and return int.
'''
keys = []
args = []
rectify(cls, '_id', spec)
where = transfer(spec, grand=None, parent='', index=keys, condition=args)
if where:
where = 'where %s' % where
return dbpc.handler.queryOne('select count(*) as total from `%s` %s' % (cls.__table__, where), [args[index][one] for index, one in enumerate(keys)])['total']
@classmethod
def insert(cls, obj, update=True, method='SINGLE', maxsize=CFG._BUFFER):
if cls.__lock is None:
cls.__lock = threading.Lock()
if obj is not None:
updatekeys = []
for k, v in cls.__mappings__.iteritems():
if not hasattr(obj, k) and not isinstance(v, IdField):
setattr(obj, k, v.default)
if update:
if not v.primary and v.updatable:
updatekeys.append(k)
tid = obj.pop('tid', None)
if '_id' in obj:
obj['id'] = obj.pop('_id')
items = obj.items()
items.sort(lambda x,y:cmp(x[0], y[0]))
if tid:
items.append(('tid', tid))
obj['tid'] = tid
if cls._insertsql is None or method == 'SINGLE':
if update:
if 'id' in obj:
del obj['id']
cls._insertsql = 'insert into `%s` (%s) ' % (cls.__table__, ','.join('`'+one[0]+'`' for one in items)) + 'values (%s)' % ','.join('%s' for one in items) + ' on duplicate key update %s' % ','.join('`'+one+'`=values(`'+one+'`)' for one in updatekeys if not one == 'create_time')
else:
cls._insertsql = 'insert ignore into `%s` (%s) ' % (cls.__table__, ','.join('`'+one[0]+'`' for one in items)) + 'values (%s)' % ','.join('%s' for one in items)
one = tuple([i[1] for i in items])
else:
one = None
if method == 'SINGLE':
if one:
try:
_id = dbpc.handler.insert(cls._insertsql, one, method)
dbpc.handler.commit()
return obj.get(cls.id_name, _id)
except:
dbpc.handler.rollback()
raise
else:
with cls.__lock:
if one is not None:
cls._insertdatas.append(one)
if cls._insertdatas and sys.getsizeof(cls._insertdatas) > maxsize:
try:
dbpc.handler.insert(cls._insertsql, cls._insertdatas, method)
dbpc.handler.commit()
except:
dbpc.handler.rollback()
raise
finally:
cls._insertdatas = []
@classmethod
def delete(cls, spec):
if spec == {}:
raise Exception("Wrong delete spec.")
keys = []
args = []
rectify(cls, '_id', spec)
where = transfer(spec, grand=None, parent='', index=keys, condition=args)
dbpc.handler.delete('delete from `%s` where %s' % (cls.__table__, where), [args[index][one] for index, one in enumerate(keys)])
@classmethod
def update(cls, spec, doc):
if spec == {}:
raise Exception("Wrong update spec.")
for k in doc:
if not k in ('$set', '$inc'):
raise Exception("Wrong update doc, only assist $set and $inc.")
sets = doc.get('$set', {}).items()
if sets:
resets = [','.join('`'+one[0]+'`=%s' for one in sets)]
else:
resets = []
incs = doc.get('$inc', {}).items()
incs = ','.join('`%s`=`%s`+%d' % (one[0], one[0], one[1]) for one in incs)
if incs:
resets.append(incs)
keys = []
args = []
rectify(cls, '_id', spec)
where = transfer(spec, grand=None, parent='', index=keys, condition=args)
dbpc.handler.update('update `%s` set %s where %s' % (cls.__table__, ','.join(resets), where), [one[1] for one in sets] + [args[index][one] for index, one in enumerate(keys)])
@classmethod
def init_table(cls):
doc = cls.genDoc()
dbpc.handler.operate(doc)
if __name__=='__main__':
pass
| |
# encoding: utf-8
"""
Test suite for the docx.api module
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from docx.api import Document
from docx.enum.text import WD_BREAK
from docx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TYPE as RT
from docx.package import Package
from docx.parts.document import DocumentPart, InlineShapes
from docx.parts.numbering import NumberingPart
from docx.parts.styles import StylesPart
from docx.section import Section
from docx.shape import InlineShape
from docx.table import Table
from docx.text import Paragraph, Run
from .unitutil.mock import (
instance_mock, class_mock, method_mock, property_mock, var_mock
)
class DescribeDocument(object):
def it_opens_a_docx_on_construction(self, init_fixture):
docx_, open_ = init_fixture
document = Document(docx_)
open_.assert_called_once_with(docx_)
assert isinstance(document, Document)
def it_can_open_a_docx_file(self, open_fixture):
docx_, Package_, package_, document_part_ = open_fixture
document_part, package = Document._open(docx_)
Package_.open.assert_called_once_with(docx_)
assert document_part is document_part
assert package is package_
def it_opens_default_template_if_no_file_provided(
self, Package_, default_docx_):
Document._open(None)
Package_.open.assert_called_once_with(default_docx_)
def it_should_raise_if_not_a_Word_file(self, Package_, package_, docx_):
package_.main_document.content_type = 'foobar'
with pytest.raises(ValueError):
Document._open(docx_)
def it_can_add_a_heading(self, add_heading_fixture):
document, add_paragraph_, paragraph_, text, level, style = (
add_heading_fixture
)
paragraph = document.add_heading(text, level)
add_paragraph_.assert_called_once_with(text, style)
assert paragraph is paragraph_
def it_should_raise_on_heading_level_out_of_range(self, document):
with pytest.raises(ValueError):
document.add_heading(level=-1)
with pytest.raises(ValueError):
document.add_heading(level=10)
def it_can_add_a_paragraph(self, add_paragraph_fixture):
document, document_part_, text, style, paragraph_ = (
add_paragraph_fixture
)
paragraph = document.add_paragraph(text, style)
document_part_.add_paragraph.assert_called_once_with(text, style)
assert paragraph is paragraph_
def it_can_add_a_page_break(self, add_page_break_fixture):
document, document_part_, paragraph_, run_ = add_page_break_fixture
paragraph = document.add_page_break()
document_part_.add_paragraph.assert_called_once_with()
paragraph_.add_run.assert_called_once_with()
run_.add_break.assert_called_once_with(WD_BREAK.PAGE)
assert paragraph is paragraph_
def it_can_add_a_picture(self, add_picture_fixture):
document, image_path_, width, height, run_, picture_ = (
add_picture_fixture
)
picture = document.add_picture(image_path_, width, height)
run_.add_picture.assert_called_once_with(image_path_, width, height)
assert picture is picture_
def it_can_add_a_section(self, add_section_fixture):
document, start_type_, section_ = add_section_fixture
section = document.add_section(start_type_)
document._document_part.add_section.assert_called_once_with(
start_type_
)
assert section is section_
def it_can_add_a_table(self, add_table_fixture):
document, rows, cols, style, document_part_, expected_style, table_ = (
add_table_fixture
)
table = document.add_table(rows, cols, style)
document_part_.add_table.assert_called_once_with(rows, cols)
assert table.style == expected_style
assert table == table_
def it_provides_access_to_the_document_inline_shapes(self, document):
body = document.inline_shapes
assert body is document._document_part.inline_shapes
def it_provides_access_to_the_document_paragraphs(
self, paragraphs_fixture):
document, paragraphs_ = paragraphs_fixture
paragraphs = document.paragraphs
assert paragraphs is paragraphs_
def it_provides_access_to_the_document_sections(self, document):
body = document.sections
assert body is document._document_part.sections
def it_provides_access_to_the_document_tables(self, tables_fixture):
document, tables_ = tables_fixture
tables = document.tables
assert tables is tables_
def it_can_save_the_package(self, save_fixture):
document, package_, file_ = save_fixture
document.save(file_)
package_.save.assert_called_once_with(file_)
def it_provides_access_to_the_numbering_part(self, num_part_get_fixture):
document, document_part_, numbering_part_ = num_part_get_fixture
numbering_part = document.numbering_part
document_part_.part_related_by.assert_called_once_with(RT.NUMBERING)
assert numbering_part is numbering_part_
def it_creates_numbering_part_on_first_access_if_not_present(
self, num_part_create_fixture):
document, NumberingPart_, document_part_, numbering_part_ = (
num_part_create_fixture
)
numbering_part = document.numbering_part
NumberingPart_.new.assert_called_once_with()
document_part_.relate_to.assert_called_once_with(
numbering_part_, RT.NUMBERING
)
assert numbering_part is numbering_part_
def it_provides_access_to_the_styles_part(self, styles_part_get_fixture):
document, document_part_, styles_part_ = styles_part_get_fixture
styles_part = document.styles_part
document_part_.part_related_by.assert_called_once_with(RT.STYLES)
assert styles_part is styles_part_
def it_creates_styles_part_on_first_access_if_not_present(
self, styles_part_create_fixture):
document, StylesPart_, document_part_, styles_part_ = (
styles_part_create_fixture
)
styles_part = document.styles_part
StylesPart_.new.assert_called_once_with()
document_part_.relate_to.assert_called_once_with(
styles_part_, RT.STYLES
)
assert styles_part is styles_part_
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
('', None),
('', 'Heading1'),
('foo\rbar', 'BodyText'),
])
def add_paragraph_fixture(
self, request, document, document_part_, paragraph_):
text, style = request.param
return document, document_part_, text, style, paragraph_
@pytest.fixture(params=[0, 1, 2, 5, 9])
def add_heading_fixture(
self, request, document, add_paragraph_, paragraph_):
level = request.param
text = 'Spam vs. Bacon'
style = 'Title' if level == 0 else 'Heading%d' % level
return document, add_paragraph_, paragraph_, text, level, style
@pytest.fixture
def add_page_break_fixture(
self, document, document_part_, paragraph_, run_):
return document, document_part_, paragraph_, run_
@pytest.fixture
def add_picture_fixture(self, request, run_, picture_):
document = Document()
image_path_ = instance_mock(request, str, name='image_path_')
width, height = 100, 200
class_mock(request, 'docx.text.Run', return_value=run_)
run_.add_picture.return_value = picture_
return (document, image_path_, width, height, run_, picture_)
@pytest.fixture
def add_section_fixture(self, document, start_type_, section_):
return document, start_type_, section_
@pytest.fixture(params=[None, 'LightShading-Accent1', 'foobar'])
def add_table_fixture(self, request, document, document_part_, table_):
rows, cols = 4, 2
style = expected_style = request.param
return (
document, rows, cols, style, document_part_, expected_style,
table_
)
@pytest.fixture
def init_fixture(self, docx_, open_):
return docx_, open_
@pytest.fixture
def num_part_get_fixture(self, document, document_part_, numbering_part_):
document_part_.part_related_by.return_value = numbering_part_
return document, document_part_, numbering_part_
@pytest.fixture
def open_fixture(self, docx_, Package_, package_, document_part_):
return docx_, Package_, package_, document_part_
@pytest.fixture
def paragraphs_fixture(self, document, paragraphs_):
return document, paragraphs_
@pytest.fixture
def save_fixture(self, request, open_, package_):
file_ = instance_mock(request, str)
document = Document()
return document, package_, file_
@pytest.fixture
def tables_fixture(self, document, tables_):
return document, tables_
# fixture components ---------------------------------------------
@pytest.fixture
def add_paragraph_(self, request, paragraph_):
return method_mock(
request, Document, 'add_paragraph', return_value=paragraph_
)
@pytest.fixture
def default_docx_(self, request):
return var_mock(request, 'docx.api._default_docx_path')
@pytest.fixture
def Document_inline_shapes_(self, request, inline_shapes_):
return property_mock(
request, Document, 'inline_shapes', return_value=inline_shapes_
)
@pytest.fixture
def document(self, open_):
return Document()
@pytest.fixture
def document_part_(
self, request, paragraph_, paragraphs_, section_, table_,
tables_):
document_part_ = instance_mock(
request, DocumentPart, content_type=CT.WML_DOCUMENT_MAIN
)
document_part_.add_paragraph.return_value = paragraph_
document_part_.add_section.return_value = section_
document_part_.add_table.return_value = table_
document_part_.paragraphs = paragraphs_
document_part_.tables = tables_
return document_part_
@pytest.fixture
def docx_(self, request):
return instance_mock(request, str)
@pytest.fixture
def inline_shapes_(self, request):
return instance_mock(request, InlineShapes)
@pytest.fixture
def num_part_create_fixture(
self, document, NumberingPart_, document_part_, numbering_part_):
document_part_.part_related_by.side_effect = KeyError
return document, NumberingPart_, document_part_, numbering_part_
@pytest.fixture
def NumberingPart_(self, request, numbering_part_):
NumberingPart_ = class_mock(request, 'docx.api.NumberingPart')
NumberingPart_.new.return_value = numbering_part_
return NumberingPart_
@pytest.fixture
def numbering_part_(self, request):
return instance_mock(request, NumberingPart)
@pytest.fixture
def open_(self, request, document_part_, package_):
return method_mock(
request, Document, '_open',
return_value=(document_part_, package_)
)
@pytest.fixture
def Package_(self, request, package_):
Package_ = class_mock(request, 'docx.api.Package')
Package_.open.return_value = package_
return Package_
@pytest.fixture
def package_(self, request, document_part_):
package_ = instance_mock(request, Package)
package_.main_document = document_part_
return package_
@pytest.fixture
def paragraph_(self, request, run_):
paragraph_ = instance_mock(request, Paragraph)
paragraph_.add_run.return_value = run_
return paragraph_
@pytest.fixture
def paragraphs_(self, request):
return instance_mock(request, list)
@pytest.fixture
def picture_(self, request):
return instance_mock(request, InlineShape)
@pytest.fixture
def run_(self, request):
return instance_mock(request, Run)
@pytest.fixture
def section_(self, request):
return instance_mock(request, Section)
@pytest.fixture
def start_type_(self, request):
return instance_mock(request, int)
@pytest.fixture
def StylesPart_(self, request, styles_part_):
StylesPart_ = class_mock(request, 'docx.api.StylesPart')
StylesPart_.new.return_value = styles_part_
return StylesPart_
@pytest.fixture
def styles_part_(self, request):
return instance_mock(request, StylesPart)
@pytest.fixture
def styles_part_create_fixture(
self, document, StylesPart_, document_part_, styles_part_):
document_part_.part_related_by.side_effect = KeyError
return document, StylesPart_, document_part_, styles_part_
@pytest.fixture
def styles_part_get_fixture(self, document, document_part_, styles_part_):
document_part_.part_related_by.return_value = styles_part_
return document, document_part_, styles_part_
@pytest.fixture
def table_(self, request):
return instance_mock(request, Table, style=None)
@pytest.fixture
def tables_(self, request):
return instance_mock(request, list)
| |
import crosscat.cython_code.MultinomialComponentModel as mcm
import math
import random
import sys
import numpy
from scipy.misc import logsumexp as logsumexp
from scipy.special import gammaln as gammaln
next_seed = lambda : random.randrange(2147483647)
###############################################################################
# Input-checking and exception-handling functions
###############################################################################
def check_type_force_float(x, name):
"""
If an int is passed, convert it to a float. If some other type is passed,
raise an exception.
"""
if type(x) is int:
return float(x)
elif type(x) is not float and type(x) is not numpy.float64:
raise TypeError("%s should be a float" % name)
else:
return x
def counts_to_data(counts):
"""
Converts a vector of counts to data.
"""
assert type(counts) is list or type(counts) is numpy.ndarray
K = len(counts)
N = int(sum(counts))
X = []
for k in range(K):
i = 0
while i < counts[k]:
X.append([k])
i += 1
assert i == counts[k]
assert len(X) == N
random.shuffle(X)
X = numpy.array(X, dtype=float)
return X
def check_data_type_column_data(X):
"""
Makes sure that X is a numpy array and that it is a column vector
"""
if type(X) is list:
X = numpy.array(X)
if type(X) is not numpy.ndarray:
raise TypeError("X should be type numpy.ndarray or a list")
if len(X.shape) == 2 and X.shape[1] > 1:
raise TypeError("X should have a single column.")
def check_model_parameters_dict(model_parameters_dict):
if type(model_parameters_dict) is not dict:
raise TypeError("model_parameters_dict should be a dict")
keys = ['weights']
for key in keys:
if key not in model_parameters_dict.keys():
raise KeyError("model_parameters_dict should have key %s" % key)
for key, value in model_parameters_dict.iteritems():
if key == "weights":
if type(value) is not list:
raise TypeError("model parameters dict key 'weights' should be a list")
if type(value[0]) is list:
raise TypeError("weights should not be a list of lists, should be a list of floats")
if math.fabs(sum(value) - 1.0) > .00000001:
raise ValueError("model parameters dict key 'weights' should sum to 1.0")
else:
raise KeyError("invalid key, %s, for model parameters dict" % key)
def check_hyperparameters_dict(hyperparameters_dict):
# 'fixed' key is not necessary for user-defined hyperparameters
keys = ['dirichlet_alpha', 'K']
for key in keys:
if key not in hyperparameters_dict.keys():
raise KeyError("hyperparameters_dict should have key %s" % key)
for key, value in hyperparameters_dict.iteritems():
if key == "K":
if type(value) is not int:
raise TypeError("hyperparameters dict entry K should be an int")
if value < 1:
raise ValueError("hyperparameters dict entry K should be greater than 0")
elif key == "dirichlet_alpha":
if type(value) is not float \
and type(value) is not numpy.float64 \
and type(value) is not int:
raise TypeError("hyperparameters dict entry dirichlet_alpha should be a float or int")
if value <= 0.0:
raise ValueError("hyperparameters dict entry dirichlet_alpha should be greater than 0")
elif key == "fixed":
pass
else:
raise KeyError("invalid key, %s, for hyperparameters dict" % key)
def check_data_vs_k(X,K):
if type(X) is numpy.ndarray:
X = X.flatten(1)
X = X.tolist()
K_data = len(set(X))
if K_data > K:
raise ValueError("the number of items in the data is greater than K")
###############################################################################
# The class extension
###############################################################################
class p_MultinomialComponentModel(mcm.p_MultinomialComponentModel):
model_type = 'symmetric_dirichlet_discrete'
cctype = 'multinomial'
@classmethod
def from_parameters(cls, N, params=None, hypers=None, gen_seed=0):
"""
Initialize a continuous component model with sufficient statistics
generated from random data.
Inputs:
N: the number of data points
params: a dict with the following keys
weights: a K-length list that sums to 1.0
hypers: a dict with the following keys
K: the number of categories
dirichlet_alpha: Dirichlet alpha parameter. The distribution is
symmetric so only one value is needed
gen_seed: an integer from which the rng is seeded
"""
if type(N) is not int:
raise TypeError("N should be an int")
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
# if the parameters dict or the hypers dict exist, validate them
if params is not None:
check_model_parameters_dict(params)
if hypers is not None:
check_hyperparameters_dict(hypers)
random.seed(gen_seed)
numpy.random.seed(gen_seed)
# get the number of categories
if params is None:
if hypers is None:
K = int(N/2.0)
else:
K = int(hypers['K'])
weights = numpy.random.random((1,K))
weights = weights/numpy.sum(weights)
weights = weights.tolist()[0]
assert len(weights) == K
params = dict(weights=weights)
check_model_parameters_dict(params)
else:
K = len(params['weights'])
if hypers:
if K != hypers['K']:
raise ValueError("K in params does not match K in hypers")
# generate synthetic data
counts = numpy.array(numpy.random.multinomial(N, params['weights']), dtype=int)
X = counts_to_data(counts)
check_data_type_column_data(X)
# generate the sufficient statistics
suffstats = dict()
for k in range(K):
suffstats[str(k)] = counts[k]
if hypers is None:
hypers = cls.draw_hyperparameters(X, n_draws=1, gen_seed=next_seed())[0]
check_hyperparameters_dict(hypers)
# hypers['K'] = check_type_force_float(hypers['K'], "hypers['K']")
hypers['dirichlet_alpha'] = check_type_force_float(hypers['dirichlet_alpha'], "hypers['dirichlet_alpha']")
# add fixed parameter to hyperparameters
hypers['fixed'] = 0.0
suffstats = {'counts':suffstats}
return cls(hypers, float(N), **suffstats)
@classmethod
def from_data(cls, X, hypers=None, gen_seed=0):
"""
Initialize a continuous component model with sufficient statistics
generated from data X
Inputs:
X: a column of data (numpy)
hypers: a dict with the following keys
K: the number of categories
dirichlet_alpha: Dirichlet alpha parameter. The distribution is
symmetric so only one value is needed
gen_seed: a int to seed the rng
"""
# FIXME: Figure out a wat to accept a list of strings
check_data_type_column_data(X)
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
random.seed(gen_seed)
numpy.random.seed(gen_seed)
if hypers is None:
hypers = cls.draw_hyperparameters(X, gen_seed=next_seed())[0]
check_hyperparameters_dict(hypers)
else:
check_hyperparameters_dict(hypers)
K = hypers['K']
check_data_vs_k(X,K)
hypers['dirichlet_alpha'] = check_type_force_float(hypers['dirichlet_alpha'], "hypers['dirichlet_alpha']")
N = len(X)
K = hypers['K']
counts = [0]*K
for x in X:
try:
counts[int(x)] += 1
except IndexError:
raise IndexError
# generate the sufficient statistics
suffstats = dict()
for k in range(int(K)):
suffstats[str(k)] = counts[k]
suffstats = {'counts':suffstats}
hypers['fixed'] = 0.0
return cls(hypers, float(N), **suffstats)
def sample_parameters_given_hyper(self, gen_seed=0):
"""
Samples weights given the current hyperparameters
Inputs:
gen_seed: integer used to seed the rng
"""
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
random.seed(gen_seed)
numpy.random.seed(gen_seed)
hypers = self.get_hypers()
dirichlet_alpha = hypers['dirichlet_alpha']
K = hypers['K']
alpha = numpy.array([dirichlet_alpha]*int(K))
weights = numpy.random.dirichlet(alpha)
weights = weights.tolist()
params = {'weights': weights}
return params
def uncollapsed_likelihood(self, X, params):
"""
Calculates the score of the data X under this component model with mean
mu and precision rho.
Inputs:
X: A column of data (numpy)
params: a dict with the following keys
weights: a list of category weights (should sum to 1)
"""
check_data_type_column_data(X)
check_model_parameters_dict(params)
hypers = self.get_hypers()
assert len(params['weights']) == int(hypers['K'])
dirichlet_alpha = hypers['dirichlet_alpha']
K = float(hypers['K'])
check_data_vs_k(X,K)
weights = numpy.array(params['weights'])
log_likelihood = self.log_likelihood(X, params)
logB = gammaln(dirichlet_alpha)*K - gammaln(dirichlet_alpha*K)
log_prior = -logB + numpy.sum((dirichlet_alpha-1.0)*numpy.log(weights))
log_p = log_likelihood + log_prior
return log_p
@staticmethod
def log_likelihood(X, params):
"""
Calculates the log likelihood of the data X given mean mu and precision
rho.
Inputs:
X: a column of data (numpy)
params: a dict with the following keys
weights: a list of categories weights (should sum to 1)
"""
check_data_type_column_data(X)
check_model_parameters_dict(params)
N = len(X)
K = len(params['weights'])
check_data_vs_k(X,K)
counts= numpy.bincount(X,minlength=K)
weights = numpy.array(params['weights'])
A = gammaln(N+1)-numpy.sum(gammaln(counts+1))
B = numpy.sum(counts*numpy.log(weights));
log_likelihood = A+B
return log_likelihood
@staticmethod
def log_pdf(X, params):
"""
Calculates the log pdf of the data X given mean mu and precision
rho.
Inputs:
X: a column of data (numpy)
params: a dict with the following keys
weights: a list of categories weights (should sum to 1)
"""
check_data_type_column_data(X)
check_model_parameters_dict(params)
N = len(X)
weights = numpy.array(params['weights'])
lpdf = []
for x in X:
w = weights[int(x)]
if w == 0.0 or w == 0:
lpdf.append(float('-Inf'))
else:
lpdf.append(math.log(w))
return numpy.array(lpdf)
def brute_force_marginal_likelihood(self, X, n_samples=10000, gen_seed=0):
"""
Calculates the log marginal likelihood via brute force method in which
parameters (weights) are repeatedly drawn from the prior, the
likelihood is calculated for each set of parameters, then the average is
taken.
Inputs:
X: A column of data (numpy)
n_samples: the number of draws
gen_Seed: seed for the rng
"""
check_data_type_column_data(X)
if type(n_samples) is not int:
raise TypeError("n_samples should be an int")
if n_samples <= 0:
raise ValueError("n_samples should be greater than 0")
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
hypers = self.get_hypers()
K = hypers['K']
check_data_vs_k(X,K)
N = float(len(X))
random.seed(gen_seed)
log_likelihoods = [0]*n_samples
for i in range(n_samples):
params = self.sample_parameters_given_hyper(gen_seed=next_seed())
log_likelihoods[i] = self.log_likelihood(X, params)
log_marginal_likelihood = logsumexp(log_likelihoods) - math.log(N)
return log_marginal_likelihood
@staticmethod
def generate_discrete_support(params):
"""
Returns the a sequential list of the number of categories
Inputs:
params: a dict with entries 'mu' and 'rho'
"""
check_model_parameters_dict(params)
return range(len(params['weights']))
@staticmethod
def draw_hyperparameters(X, n_draws=1, gen_seed=0):
"""
Draws hyperparameters dirichlet_alpha from the same distribution that
generates the grid in the C++ code.
Inputs:
X: a column of data or an int which acts as K. If a data array is
provided, K is assumed to be max(X)+1
n_draws: the number of draws
gen_seed: seed the rng
Output:
A list of dicts of draws where each entry has keys 'dirichlet_alpha'
and 'K'. K is defined by the data and will be the same for each samples
"""
if type(X) is list or type(X) is numpy.ndarray:
check_data_type_column_data(X)
K = int(max(X)+1)
elif type(X) is int:
if X < 1:
raise ValueError("If X is an int, it should be greatert than 1")
K = X
else:
raise TypeError("X should be an array or int")
if type(n_draws) is not int:
raise TypeError("n_draws should be an int")
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
random.seed(gen_seed)
samples = []
# get draw ranges
alpha_draw_range = (0.1, math.log(K))
for i in range(n_draws):
alpha = math.exp(random.uniform(alpha_draw_range[0], alpha_draw_range[1]))
this_draw = dict(dirichlet_alpha=alpha, K=K)
samples.append(this_draw)
assert len(samples) == n_draws
return samples
@staticmethod
def generate_data_from_parameters(params, N, gen_seed=0):
"""
returns a set of intervals over which the component model pdf is
supported.
Inputs:
params: a dict with entries 'weights'
N: number of data points
"""
if type(N) is not int:
raise TypeError("N should be an int")
if N <= 0:
raise ValueError("N should be greater than 0")
if type(params) is not dict:
raise TypeError("params should be a dict")
check_model_parameters_dict(params)
# multinomial draw
counts = numpy.array(numpy.random.multinomial(N, params['weights']), dtype=int)
X = counts_to_data(counts)
assert len(X) == N
return X
| |
# The MIT License (MIT)
# Copyright (c) 2013 Dominic May
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
contains some functions
required by both main.py and humans.py
"""
# this could be dangerous D:
from __future__ import (
division,
absolute_import,
division,
generators,
nested_scopes,
print_function,
with_statement
)
import os
import json
import urllib
import base64
import hashlib
import logging
from operator import itemgetter
# google appengine imports
import webapp2
from google.appengine.api import mail
from google.appengine.api import users
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext.webapp import template
# for debugging exceptions
import sys
import traceback
# application specific
from module_utils import get_hardware_data, get_module_data
# authentication data
client_auth_data = memcache.get('client_auth_data')
if not client_auth_data:
with open('auth_data.json', 'r') as fh:
auth_data = json.load(fh)
client_auth_data = auth_data["client_auth_data"]
def _get_live_data(handler, fragment):
"""
get_url_content's fragment['url'], b64decode's module['content']
"""
module = get_url_content(handler, fragment['url'])
assert 'content' in module
return base64.b64decode(module['content'])
def get_live_hardware_data(handler, fragment):
"""
Given a get_tree fragment,
returns hardware data in a python dict
"""
return get_hardware_data(_get_live_data(handler, fragment))
def get_live_module_data(handler, fragment):
"""
Given a get_tree fragment,
returns module data in a python dict
"""
return get_module_data(_get_live_data(handler, fragment))
def _get_tree(handler=None):
"""
Returns the file hierarchy/tree
"""
result = get_url_content(handler, 'https://api.github.com/repos/DCPUTeam/DCPUModules/git/trees/master')
assert result['tree'], result
return result['tree']
def get_modules(handler=None):
"""
Returns the file hierarchy/tree, filtered by a .lua extension
"""
tree = _get_tree(handler)
return [
fragment
for fragment in tree
if fragment['path'].endswith('.lua')
]
def get_module_names(handler):
"""
Returns list containing the path attributes of all modules
"""
modules = get_modules(handler)
modules = map(itemgetter('path'), modules)
return map(rpart, modules)
def get_url_content(handler, url):
"""
A wrapper around authed_fetch_json, caches results to help keep wait time short
"""
url_hash = md5_hash(url)
result = memcache.get(url_hash)
if result is None:
logging.info('Getting the result from the GitHub API')
try:
result = authed_fetch_json(url)
except urlfetch.DownloadError as e:
logging.error(e)
handler.error(408)
return []
else:
memcache.set(url_hash, result)
else:
logging.info('Memcache get successful; %.40s' % result)
# check if the api limit has been reached
assert not result.get('message', '').startswith(
'API Rate Limit Exceeded for'), 'API Limit reached'
return result
def authed_fetch(url, headers=None):
# add admin contact, auth_data
headers = headers or {}
headers.update({'X-Admin-Contact': 'admin@lysdev.com'})
# build the url
url += '&' if '?' in url else '?'
url += urllib.urlencode(client_auth_data)
r = urlfetch.fetch(url=url, headers=headers)
remaining = r.headers.get('x-ratelimit-remaining')
if remaining:
logging.info('{} requests remaining for this hour.'.format(remaining))
memcache.set('requests_remaining', int(remaining))
else:
logging.info(
'Could not determine number of requests remaining for this hour')
logging.info(r.content)
return r
def authed_fetch_json(*args, **kwargs):
"""
parse json output from proxied authed_fetch
"""
return json.loads(authed_fetch(*args, **kwargs).content)
class BaseRequestHandler(webapp2.RequestHandler):
def handle_exception(self, exception, debug_mode):
if development():
return super(BaseRequestHandler, self).handle_exception(exception, debug_mode)
lines = ''.join(traceback.format_exception(*sys.exc_info()))
logging.error(lines)
template_values = {
'traceback': lines.replace('\n', '<br/>')
}
html = self.dorender('error.html', template_values, write=False)
mail.send_mail(
sender='debugging@dcputoolchain-module-site.appspotmail.com',
to="jack.thatch@gmail.com",
subject='Caught Exception',
body=lines,
html=html)
if users.is_current_user_admin():
raise exception
else:
self.error(500)
if isinstance(exception, AssertionError):
self.dorender('unexpected_result.html', {})
def dorender(self, tname='base.html', values=None, write=True):
"""
automates some stuff so we dont have to type
it in everytime we want to use a template
"""
self.response.headers['Content-Type'] = 'text/html'
path = os.path.join(os.path.dirname(__file__), 'templates/' + tname)
data = template.render(path, values or {})
if write:
self.response.out.write(data)
else:
return data
def development():
return os.environ['SERVER_SOFTWARE'].find('Development') == 0
def rpart(path):
return path.rpartition('/')[-1]
def md5_hash(string):
return hashlib.md5(string).hexdigest()
| |
import json
import logging
import requests
from requests import exceptions, Session
from python_kemptech_api import utils
from .api_xml import (
is_successful,
get_data,
get_error_msg)
from .exceptions import (
KempTechApiException,
ConnectionTimeoutException,
GenericObjectMissingLoadMasterInfo,
UnauthorizedAccessError)
from .utils import UseTlsAdapter, send_response
requests.packages.urllib3.disable_warnings()
log = logging.getLogger(__name__)
logging.basicConfig()
class HttpClient(object):
"""Client that performs HTTP requests."""
ip_address = None
endpoint = None
def __init__(self, tls_version=utils.DEFAULT_TLS_VERSION, cert=None,
user=None, password=None):
self.cert = cert
self.auth = (user, password)
self._tls_version = tls_version
self._tls_session = Session()
self._tls_session.mount("http://", UseTlsAdapter(self._tls_version))
self._retry_count = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._tls_session.close()
return False
def _get_basic_auth(self):
if self.cert:
return None
else:
return self.auth
def _do_request(self, http_method, rest_command,
parameters=None, file=None, data=None,
headers=None, retries=0):
"""Perform a HTTP request.
:param http_method: GET or POST.
:param rest_command: The command to run.
:param parameters: dict containing parameters.
:param file: Location of file to send.
:return: The Status code of request and the response text body.
"""
cmd_url = "{endpoint}{cmd}?".format(endpoint=self.endpoint,
cmd=rest_command)
log.debug(cmd_url)
# If a certificate has been specified,
# use that instead of the potentially (None, None) auth tuple
auth = self._get_basic_auth()
try:
if file is not None:
with open(file, 'rb') as payload:
response = self._tls_session.request(http_method, cmd_url,
params=parameters,
verify=False,
data=payload,
headers=headers,
cert=self.cert,
auth=auth)
else:
response = self._tls_session.request(http_method,
cmd_url,
params=parameters,
data=data,
timeout=utils.TIMEOUT,
verify=False,
headers=headers,
cert=self.cert,
auth=auth)
self._tls_session.close()
# Raise specific error for authentication failure
if response.status_code == 401:
log.warning("Cannot authenticate to %s check that the "
"credentials are correct.", self.ip_address)
raise UnauthorizedAccessError(self.ip_address,
response.status_code)
# Raise generic error for other API failures
if 400 < response.status_code < 500:
raise KempTechApiException(msg=response.text,
code=response.status_code)
else:
response.raise_for_status()
except exceptions.ConnectTimeout:
log.error("The connection timed out to %s.",
self.ip_address)
raise ConnectionTimeoutException(self.ip_address)
except (exceptions.ReadTimeout, exceptions.ConnectionError) as e:
if retries < self._retry_count:
log.warning("A %s occurred to %s.", e.__class__.__name__,
self.ip_address)
return self._do_request(http_method,
rest_command,
parameters,
file,
retries + 1)
else:
log.warning("A repeated %s occurred to %s.",
e.__class__.__name__, self.ip_address)
raise
except exceptions.URLRequired:
log.error("%s is an invalid URL", cmd_url)
raise
except exceptions.TooManyRedirects:
log.error("Too many redirects with request to %s.", cmd_url)
raise
except exceptions.Timeout:
log.error("A connection %s has timed out.", self.ip_address)
raise
except exceptions.HTTPError:
log.error("A HTTP error occurred with request to %s.", cmd_url)
raise KempTechApiException(msg=response.text,
code=response.status_code)
except exceptions.RequestException:
log.error("An error occurred with request to %s.", cmd_url)
raise
return response.text
def _get(self, rest_command, parameters=None, headers=None):
return self._do_request('GET', rest_command, parameters,
headers=headers)
def _post(self, rest_command, file=None, parameters=None, headers=None, data=None):
return self._do_request('POST', rest_command, parameters=parameters,
file=file, data=data)
class AccessInfoMixin(object):
endpoint = None
ip_address = None
cert = None
auth = None
@property
def access_info(self):
info = {
"endpoint": self.endpoint,
"ip_address": self.ip_address,
"cert": self.cert,
"auth": self.auth,
"appliance": self
}
return info
class BaseKempObject(HttpClient, AccessInfoMixin):
_API_ADD = ""
_API_MOD = ""
_API_DELETE = ""
_API_GET = ""
_API_LIST = ""
API_TAG = ""
API_INIT_PARAMS = {}
_API_BASE_PARAMS = {}
_API_DEFAULT_ATTRIBUTES = {}
# Blacklist attributes that shouldn't be pushed to the loadmaster.
_API_IGNORE = (
"log_urls", "ip_address", "endpoint", "rsindex", "vsindex", "index",
"status", "subvs_data", "subvs_entries", "real_servers", "cert",
"checkuse1_1", "mastervsid", "API_INIT_PARAMS", "API_TAG", "auth"
)
def __init__(self, loadmaster_info, **kwargs):
try:
self.endpoint = loadmaster_info["endpoint"]
except KeyError:
raise GenericObjectMissingLoadMasterInfo(type(self), "endpoint")
try:
self.ip_address = loadmaster_info["ip_address"]
except KeyError:
raise GenericObjectMissingLoadMasterInfo(type(self), "ip_address")
try:
self.auth = loadmaster_info["auth"]
except KeyError:
raise GenericObjectMissingLoadMasterInfo(type(self), "auth")
cert = loadmaster_info.get("cert")
super(BaseKempObject, self).__init__(cert=cert,
user=self.auth[0], password=self.auth[1])
def __repr__(self):
return '{} {}'.format(
self.__class__.__name__,
json.dumps(self.to_dict()))
def _is_successful_or_raise(self, response):
if is_successful(response):
data = get_data(response)
self.populate_default_attributes(data)
else:
raise KempTechApiException(get_error_msg(response))
@property
def access_info(self):
info = super(BaseKempObject, self).access_info
info.update(self._get_base_parameters())
return info
def save(self, update=False):
if not update:
response = self._get(self._API_ADD, self.to_api_dict())
else:
response = self._get(self._API_MOD, self.to_api_dict())
self._is_successful_or_raise(response)
def update(self):
self.save(update=True)
def refresh(self):
response = self._get(
self._API_GET,
self._get_base_parameters())
xml_object = get_data(response)
# Again line below will fail with ValidationError if
# empty responselm.
self.populate_default_attributes(xml_object)
def delete(self):
response = self._get(self._API_DELETE, self._get_base_parameters())
return send_response(response)
def to_api_dict(self):
"""Returns API related attributes as a dict
Ignores attributes listed in _api_ignore and also attributes
beginning with an underscore (_). Also ignore values of None"""
api_dict = {}
for key, value in self.__dict__.items():
if (key in self._API_IGNORE or key.startswith("_") or
value is None):
continue
api_dict[key] = value
return api_dict
def to_dict(self):
"""returns attributes whose values are not None or whose name starts
with _ as a dict"""
api_dict = {}
for key, value in self.__dict__.items():
if key.startswith("_") or value is None:
continue
api_dict[key] = value
return api_dict
def _get_base_parameters(self):
"""Returns the bare minimum parameters."""
base_parameters = {}
for parameter in self._API_BASE_PARAMS:
base_parameters[parameter] = self.__getattribute__(parameter)
return base_parameters
def populate_default_attributes(self, params):
"""Populate object instance with standard defaults"""
if len(params) == 0:
log.warning("No data was returned, leaving data intact")
return
if len(params) == 1:
if self.API_TAG in params.keys():
self.populate_default_attributes(params[self.API_TAG])
return
for attribute, tag in self._API_DEFAULT_ATTRIBUTES.items():
setattr(self, attribute, params.get(tag, None))
| |
from __future__ import absolute_import
import datetime
import ujson
import zlib
from django.utils.translation import ugettext as _
from six import binary_type
from typing import Text
from zerver.lib.avatar import get_avatar_url
from zerver.lib.avatar_hash import gravatar_hash
import zerver.lib.bugdown as bugdown
from zerver.lib.cache import cache_with_key, to_dict_cache_key
from zerver.lib.request import JsonableError
from zerver.lib.str_utils import force_bytes, dict_with_str_keys
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.models import (
get_display_recipient_by_id,
Message,
Recipient,
Stream,
UserProfile,
UserMessage,
Reaction
)
from typing import Any, Dict, List, Optional, Tuple, Text
RealmAlertWords = Dict[int, List[Text]]
def extract_message_dict(message_bytes):
# type: (binary_type) -> Dict[str, Any]
return dict_with_str_keys(ujson.loads(zlib.decompress(message_bytes).decode("utf-8")))
def stringify_message_dict(message_dict):
# type: (Dict[str, Any]) -> binary_type
return zlib.compress(force_bytes(ujson.dumps(message_dict)))
def message_to_dict(message, apply_markdown):
# type: (Message, bool) -> Dict[str, Any]
json = message_to_dict_json(message, apply_markdown)
return extract_message_dict(json)
@cache_with_key(to_dict_cache_key, timeout=3600*24)
def message_to_dict_json(message, apply_markdown):
# type: (Message, bool) -> binary_type
return MessageDict.to_dict_uncached(message, apply_markdown)
class MessageDict(object):
@staticmethod
def to_dict_uncached(message, apply_markdown):
# type: (Message, bool) -> binary_type
dct = MessageDict.to_dict_uncached_helper(message, apply_markdown)
return stringify_message_dict(dct)
@staticmethod
def to_dict_uncached_helper(message, apply_markdown):
# type: (Message, bool) -> Dict[str, Any]
return MessageDict.build_message_dict(
apply_markdown = apply_markdown,
message = message,
message_id = message.id,
last_edit_time = message.last_edit_time,
edit_history = message.edit_history,
content = message.content,
subject = message.subject,
pub_date = message.pub_date,
rendered_content = message.rendered_content,
rendered_content_version = message.rendered_content_version,
sender_id = message.sender.id,
sender_email = message.sender.email,
sender_realm_id = message.sender.realm_id,
sender_realm_domain = message.sender.realm.domain,
sender_full_name = message.sender.full_name,
sender_short_name = message.sender.short_name,
sender_avatar_source = message.sender.avatar_source,
sender_is_mirror_dummy = message.sender.is_mirror_dummy,
sending_client_name = message.sending_client.name,
recipient_id = message.recipient.id,
recipient_type = message.recipient.type,
recipient_type_id = message.recipient.type_id,
reactions = Reaction.get_raw_db_rows([message.id])
)
@staticmethod
def build_dict_from_raw_db_row(row, apply_markdown):
# type: (Dict[str, Any], bool) -> Dict[str, Any]
'''
row is a row from a .values() call, and it needs to have
all the relevant fields populated
'''
return MessageDict.build_message_dict(
apply_markdown = apply_markdown,
message = None,
message_id = row['id'],
last_edit_time = row['last_edit_time'],
edit_history = row['edit_history'],
content = row['content'],
subject = row['subject'],
pub_date = row['pub_date'],
rendered_content = row['rendered_content'],
rendered_content_version = row['rendered_content_version'],
sender_id = row['sender_id'],
sender_email = row['sender__email'],
sender_realm_id = row['sender__realm__id'],
sender_realm_domain = row['sender__realm__domain'],
sender_full_name = row['sender__full_name'],
sender_short_name = row['sender__short_name'],
sender_avatar_source = row['sender__avatar_source'],
sender_is_mirror_dummy = row['sender__is_mirror_dummy'],
sending_client_name = row['sending_client__name'],
recipient_id = row['recipient_id'],
recipient_type = row['recipient__type'],
recipient_type_id = row['recipient__type_id'],
reactions=row['reactions']
)
@staticmethod
def build_message_dict(
apply_markdown,
message,
message_id,
last_edit_time,
edit_history,
content,
subject,
pub_date,
rendered_content,
rendered_content_version,
sender_id,
sender_email,
sender_realm_id,
sender_realm_domain,
sender_full_name,
sender_short_name,
sender_avatar_source,
sender_is_mirror_dummy,
sending_client_name,
recipient_id,
recipient_type,
recipient_type_id,
reactions
):
# type: (bool, Message, int, datetime.datetime, Text, Text, Text, datetime.datetime, Text, Optional[int], int, Text, int, Text, Text, Text, Text, bool, Text, int, int, int, List[Dict[str, Any]]) -> Dict[str, Any]
avatar_url = get_avatar_url(sender_avatar_source, sender_email)
display_recipient = get_display_recipient_by_id(
recipient_id,
recipient_type,
recipient_type_id
)
if recipient_type == Recipient.STREAM:
display_type = "stream"
elif recipient_type in (Recipient.HUDDLE, Recipient.PERSONAL):
assert not isinstance(display_recipient, Text)
display_type = "private"
if len(display_recipient) == 1:
# add the sender in if this isn't a message between
# someone and his self, preserving ordering
recip = {'email': sender_email,
'domain': sender_realm_domain,
'full_name': sender_full_name,
'short_name': sender_short_name,
'id': sender_id,
'is_mirror_dummy': sender_is_mirror_dummy}
if recip['email'] < display_recipient[0]['email']:
display_recipient = [recip, display_recipient[0]]
elif recip['email'] > display_recipient[0]['email']:
display_recipient = [display_recipient[0], recip]
obj = dict(
id = message_id,
sender_email = sender_email,
sender_full_name = sender_full_name,
sender_short_name = sender_short_name,
sender_domain = sender_realm_domain,
sender_id = sender_id,
type = display_type,
display_recipient = display_recipient,
recipient_id = recipient_id,
subject = subject,
timestamp = datetime_to_timestamp(pub_date),
gravatar_hash = gravatar_hash(sender_email), # Deprecated June 2013
avatar_url = avatar_url,
client = sending_client_name)
if obj['type'] == 'stream':
obj['stream_id'] = recipient_type_id
obj['subject_links'] = bugdown.subject_links(sender_realm_id, subject)
if last_edit_time != None:
obj['last_edit_timestamp'] = datetime_to_timestamp(last_edit_time)
obj['edit_history'] = ujson.loads(edit_history)
if apply_markdown:
if Message.need_to_render_content(rendered_content, rendered_content_version, bugdown.version):
if message is None:
# We really shouldn't be rendering objects in this method, but there is
# a scenario where we upgrade the version of bugdown and fail to run
# management commands to re-render historical messages, and then we
# need to have side effects. This method is optimized to not need full
# blown ORM objects, but the bugdown renderer is unfortunately highly
# coupled to Message, and we also need to persist the new rendered content.
# If we don't have a message object passed in, we get one here. The cost
# of going to the DB here should be overshadowed by the cost of rendering
# and updating the row.
# TODO: see #1379 to eliminate bugdown dependencies
message = Message.objects.select_related().get(id=message_id)
# It's unfortunate that we need to have side effects on the message
# in some cases.
rendered_content = render_markdown(message, content, realm_id=sender_realm_id)
message.rendered_content = rendered_content
message.rendered_content_version = bugdown.version
message.save_rendered_content()
if rendered_content is not None:
obj['content'] = rendered_content
else:
obj['content'] = u'<p>[Zulip note: Sorry, we could not understand the formatting of your message]</p>'
obj['content_type'] = 'text/html'
else:
obj['content'] = content
obj['content_type'] = 'text/x-markdown'
obj['reactions'] = [ReactionDict.build_dict_from_raw_db_row(reaction)
for reaction in reactions]
return obj
class ReactionDict(object):
@staticmethod
def build_dict_from_raw_db_row(row):
# type: (Dict[str, Any]) -> Dict[str, Any]
return {'emoji_name': row.get('emoji_name'),
'user': {'email': row.get('user_profile__email'),
'id': row.get('user_profile__id'),
'full_name': row.get('user_profile__full_name')}}
def re_render_content_for_management_command(message):
# type: (Message) -> None
'''
Please avoid using this function, as its only used in a management command that
is somewhat deprecated.
'''
assert Message.need_to_render_content(message.rendered_content,
message.rendered_content_version,
bugdown.version)
rendered_content = render_markdown(message, message.content)
message.rendered_content = rendered_content
message.rendered_content_version = bugdown.version
message.save_rendered_content()
def access_message(user_profile, message_id):
# type: (UserProfile, int) -> Tuple[Message, UserMessage]
"""You can access a message by ID in our APIs that either:
(1) You received or have previously accessed via starring
(aka have a UserMessage row for).
(2) Was sent to a public stream in your realm.
We produce consistent, boring error messages to avoid leaking any
information from a security perspective.
"""
try:
message = Message.objects.select_related().get(id=message_id)
except Message.DoesNotExist:
raise JsonableError(_("Invalid message(s)"))
try:
user_message = UserMessage.objects.select_related().get(user_profile=user_profile,
message=message)
except UserMessage.DoesNotExist:
user_message = None
if user_message is None:
if message.recipient.type != Recipient.STREAM:
# You can't access private messages you didn't receive
raise JsonableError(_("Invalid message(s)"))
stream = Stream.objects.get(id=message.recipient.type_id)
if not stream.is_public():
# You can't access messages sent to invite-only streams
# that you didn't receive
raise JsonableError(_("Invalid message(s)"))
# So the message is to a public stream
if stream.realm != user_profile.realm:
# You can't access public stream messages in other realms
raise JsonableError(_("Invalid message(s)"))
# Otherwise, the message must have been sent to a public
# stream in your realm, so return the message, user_message pair
return (message, user_message)
def render_markdown(message, content, realm_id=None, realm_alert_words=None, message_users=None):
# type: (Message, Text, Optional[int], Optional[RealmAlertWords], Set[UserProfile]) -> Text
"""Return HTML for given markdown. Bugdown may add properties to the
message object such as `mentions_user_ids` and `mentions_wildcard`.
These are only on this Django object and are not saved in the
database.
"""
if message_users is None:
message_user_ids = set() # type: Set[int]
else:
message_user_ids = {u.id for u in message_users}
if message is not None:
message.mentions_wildcard = False
message.is_me_message = False
message.mentions_user_ids = set()
message.alert_words = set()
message.links_for_preview = set()
if realm_id is None:
realm_id = message.sender.realm_id
if message.sending_client.name == "zephyr_mirror" and message.sender.realm.is_zephyr_mirror_realm:
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
realm_id = bugdown.ZEPHYR_MIRROR_BUGDOWN_KEY
possible_words = set() # type: Set[Text]
if realm_alert_words is not None:
for user_id, words in realm_alert_words.items():
if user_id in message_user_ids:
possible_words.update(set(words))
# DO MAIN WORK HERE -- call bugdown to convert
rendered_content = bugdown.convert(content, realm_id=realm_id, message=message,
possible_words=possible_words)
if message is not None:
message.user_ids_with_alert_words = set()
if realm_alert_words is not None:
for user_id, words in realm_alert_words.items():
if user_id in message_user_ids:
if set(words).intersection(message.alert_words):
message.user_ids_with_alert_words.add(user_id)
message.is_me_message = Message.is_status_message(content, rendered_content)
return rendered_content
| |
#!/usr/bin/env python
'''
Test_Dedt.py
Defines unit tests for dedt.py from src/dedt/
'''
#############
# IMPORTS #
#############
# standard python packages
import inspect, os, sqlite3, sys, unittest
from StringIO import StringIO
# ------------------------------------------------------ #
# import sibling packages HERE!!!
sys.path.append( os.path.abspath( __file__ + "/../../../../src" ) )
from dedt import dedt, dedalusParser, clockRelation, dedalusRewriter
from utils import tools
# ------------------------------------------------------ #
testPath = os.path.abspath(__file__+"/../../../../qa")
###############
# TEST DEDT #
###############
class Test_dedt( unittest.TestCase ) :
##############################
# CREATE DEDALUS IR TABLES #
##############################
def test_createDedalusIRTables_dedt( self ) :
#testing set up
testDB = testPath + "/IR.db"
IRDB = sqlite3.connect( testDB )
cursor = IRDB.cursor()
#checks if it runs through function without error
self.assertTrue(dedt.createDedalusIRTables(cursor)==None)
#checks if the tables are actually created
self.assertFalse(cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='Fact'").fetchone()==None)
self.assertFalse(cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='FactAtt'").fetchone()==None)
self.assertFalse(cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='Rule'").fetchone()==None)
self.assertFalse(cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='GoalAtt'").fetchone()==None)
self.assertFalse(cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='Subgoals'").fetchone()==None)
self.assertFalse(cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='SubgoalAtt'").fetchone()==None)
self.assertFalse(cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='SubgoalAddArgs'").fetchone()==None)
self.assertFalse(cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='Equation'").fetchone()==None)
self.assertFalse(cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='Clock'").fetchone()==None)
self.assertFalse(cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='Crash'").fetchone()==None)
#clean up testing
IRDB.close()
os.remove( testDB )
################
# DEDT TO IR #
################
def test_dedToIR_dedt( self ) :
#testing set up. dedToIR has dependency
#on createDedalusIRTables so that's
#tested first above.
testDB = testPath + "/IR.db"
IRDB = sqlite3.connect( testDB )
cursor = IRDB.cursor()
#dependency
dedt.createDedalusIRTables(cursor)
#throws error for nonexistent file
inputfile = "./nonexistentfile.ded"
with self.assertRaises(SystemExit) as cm:
dedt.dedToIR(inputfile,cursor)
self.assertIn("ERROR",cm.exception.code)
#runs through function to make sure it finishes without error
inputfile = testPath+"/testfiles/testFullProgram.ded"
outputResult = None
self.assertFalse(dedt.dedToIR(inputfile,cursor)==outputResult)
#clean up testing
IRDB.close()
os.remove( testDB )
###################
# STARTER CLOCK #
###################
def test_starterClock_dedt( self ) :
#tested in clockRelation tests below
return None
############
# REWRITE #
############
def test_rewrite_dedt( self ) :
#tested in dedalusRewriter and
#provenanceRewriter below
return None
####################
# RUN TRANSLATOR #
####################
def test_runTranslator_dedt( self ) :
#testing set up. runTranslator has dependency
#on createDedalusIRTables so that's
#tested first above.
testDB = testPath + "/IR.db"
IRDB = sqlite3.connect( testDB )
cursor = IRDB.cursor()
tableList = testPath + "/testfiles/tableListStr.data"
datalogProg = testPath + "/testfiles/c4program.olg"
#dependency
dedt.createDedalusIRTables(cursor)
inputfile = testPath+"/testfiles/testFullProgram.ded"
inputArg = {'prov_diagrams': False, 'use_symmetry': False, 'crashes': 0, 'solver': None,
'disable_dot_rendering': False, 'negative_support': False, 'strategy': None,
'file': testPath+"/testfiles/testFullProgram.ded", 'EOT': 3, 'find_all_counterexamples': False,
'nodes': ['a', 'b', 'c', 'd'], 'EFF': 2, 'evaluator': 'c4'}
#runs through function to make sure it finishes without error
outputResult = None
evaluator = "c4"
with self.assertRaises( SystemExit ) :
dedt.runTranslator(cursor,inputfile,inputArg,tableList,None,evaluator)
outpaths = dedt.runTranslator(cursor,inputfile,inputArg,tableList,datalogProg,evaluator)
if not outpaths is None :
tables = outpaths[0]
c4file = outpaths[1]
#clean up testing
IRDB.close()
os.remove( testDB )
if tables is not None:
os.remove(tables)
if c4file is not None:
os.remove(c4file)
#######################
# TRANSLATE DEDALUS #
#######################
def test_translateDedalus_dedt( self ) :
testDB = testPath + "/IR.db"
IRDB = sqlite3.connect( testDB )
cursor = IRDB.cursor()
#throw error when file not found (currently leaves behind the DB file)
'''inputArg = {'prov_diagrams': False, 'use_symmetry': False, 'crashes': 0, 'solver': None,
'disable_dot_rendering': False, 'negative_support': False, 'strategy': None,
'file': './nonexistentfile.ded', 'EOT': 3, 'find_all_counterexamples': False,
'nodes': ['a', 'b', 'c', 'd'], 'EFF': 2}
with self.assertRaises(SystemExit) as cm:
dedt.translateDedalus(inputArg)
self.assertIn("ERROR",cm.exception.code)'''
#returns a result
inputArg = {'prov_diagrams': False, 'use_symmetry': False, 'crashes': 0, 'solver': None,
'disable_dot_rendering': False, 'negative_support': False, 'strategy': None,
'file': testPath+"/testfiles/testFullProgram.ded", 'EOT': 3, 'find_all_counterexamples': False,
'nodes': ['a', 'b', 'c', 'd'], 'EFF': 2, 'evaluator': 'c4'}
tableList = testPath + "/testfiles/tableListStr.data"
datalogProg = testPath + "/testfiles/c4program.olg"
outputResult = None
self.assertTrue(dedt.translateDedalus(inputArg, tableList, datalogProg, cursor)==outputResult)
##############
# CLEAN UP #
##############
def test_cleanUp( self ) :
return None
#########################
# THREAD OF EXECUTION #
#########################
# use this main if running this script exclusively.
if __name__ == "__main__" :
unittest.main( verbosity=2 )
#########
# EOF #
#########
| |
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
from scipy.special import gammaln
from scipy.optimize import fmin_powell, minimize_scalar
from skbio.stats import subsample
def _validate(counts, suppress_cast=False):
"""Validate and convert input to an acceptable counts vector type.
Note: may not always return a copy of `counts`!
"""
counts = np.asarray(counts)
if not suppress_cast:
counts = counts.astype(int, casting='safe', copy=False)
if counts.ndim != 1:
raise ValueError("Only 1-D vectors are supported.")
elif (counts < 0).any():
raise ValueError("Counts vector cannot contain negative values.")
return counts
def berger_parker_d(counts):
"""Calculate Berger-Parker dominance.
Berger-Parker dominance is defined as the fraction of the sample that
belongs to the most abundant OTUs:
.. math::
d = \\frac{N_{max}}{N}
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Berger-Parker dominance.
Notes
-----
Berger-Parker dominance is defined in [1]_. The implementation here is
based on the description given in the SDR-IV online manual [2]_.
References
----------
.. [1] Berger & Parker (1970). SDR-IV online help.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
return counts.max() / counts.sum()
def brillouin_d(counts):
"""Calculate Brillouin index of alpha diversity, which is defined as:
.. math::
HB = \\frac{\\ln N!-\\sum^5_{i=1}{\\ln n_i!}}{N}
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Brillouin index.
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
nz = counts[counts.nonzero()]
n = nz.sum()
return (gammaln(n + 1) - gammaln(nz + 1).sum()) / n
def dominance(counts):
"""Calculate dominance.
Dominance is defined as
.. math::
\\sum{p_i^2}
where :math:`p_i` is the proportion of the entire community that OTU
:math:`i` represents.
Dominance can also be defined as 1 - Simpson's index. It ranges between
0 and 1.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Dominance.
See Also
--------
simpson
Notes
-----
The implementation here is based on the description given in [1]_.
References
----------
.. [1] http://folk.uio.no/ohammer/past/diversity.html
"""
counts = _validate(counts)
freqs = counts / counts.sum()
return (freqs * freqs).sum()
def doubles(counts):
"""Calculate number of double occurrences (doubletons).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Doubleton count.
"""
counts = _validate(counts)
return (counts == 2).sum()
def enspie(counts):
"""Calculate ENS_pie alpha diversity measure.
ENS_pie is equivalent to ``1 / dominance``.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
ENS_pie alpha diversity measure.
See Also
--------
dominance
Notes
-----
ENS_pie is defined in [1]_.
References
----------
.. [1] Chase and Knight (2013). "Scale-dependent effect sizes of ecological
drivers on biodiversity: why standardised sampling is not enough".
Ecology Letters, Volume 16, Issue Supplement s1, pgs 17-26.
"""
counts = _validate(counts)
return 1 / dominance(counts)
def equitability(counts, base=2):
"""Calculate equitability (Shannon index corrected for number of OTUs).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
base : scalar, optional
Logarithm base to use in the calculations.
Returns
-------
double
Measure of equitability.
See Also
--------
shannon
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
numerator = shannon(counts, base)
denominator = np.log(observed_otus(counts)) / np.log(base)
return numerator / denominator
def esty_ci(counts):
"""Calculate Esty's CI.
Esty's CI is defined as
.. math::
F_1/N \\pm z\\sqrt{W}
where :math:`F_1` is the number of singleton OTUs, :math:`N` is the total
number of individuals (sum of abundances for all OTUs), and :math:`z` is a
constant that depends on the targeted confidence and based on the normal
distribution.
:math:`W` is defined as
.. math::
\\frac{F_1(N-F_1)+2NF_2}{N^3}
where :math:`F_2` is the number of doubleton OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
tuple
Esty's confidence interval as ``(lower_bound, upper_bound)``.
Notes
-----
Esty's CI is defined in [1]_. :math:`z` is hardcoded for a 95% confidence
interval.
References
----------
.. [1] Esty, W. W. (1983). "A normal limit law for a nonparametric
estimator of the coverage of a random sample". Ann Statist 11: 905-912.
"""
counts = _validate(counts)
f1 = singles(counts)
f2 = doubles(counts)
n = counts.sum()
z = 1.959963985
W = (f1 * (n - f1) + 2 * n * f2) / (n ** 3)
return f1 / n - z * np.sqrt(W), f1 / n + z * np.sqrt(W)
def fisher_alpha(counts):
"""Calculate Fisher's alpha.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Fisher's alpha.
Raises
------
RuntimeError
If the optimizer fails to converge (error > 1.0).
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_. Uses ``scipy.optimize.minimize_scalar`` to find
Fisher's alpha.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
n = counts.sum()
s = observed_otus(counts)
def f(alpha):
return (alpha * np.log(1 + (n / alpha)) - s) ** 2
# Temporarily silence RuntimeWarnings (invalid and division by zero) during
# optimization in case invalid input is provided to the objective function
# (e.g. alpha=0).
orig_settings = np.seterr(divide='ignore', invalid='ignore')
try:
alpha = minimize_scalar(f).x
finally:
np.seterr(**orig_settings)
if f(alpha) > 1.0:
raise RuntimeError("Optimizer failed to converge (error > 1.0), so "
"could not compute Fisher's alpha.")
return alpha
def goods_coverage(counts):
"""Calculate Good's coverage of counts.
Good's coverage estimator is defined as
.. math::
1-\\frac{F_1}{N}
where :math:`F_1` is the number of singleton OTUs and :math:`N` is the
total number of individuals (sum of abundances for all OTUs).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Good's coverage estimator.
"""
counts = _validate(counts)
f1 = singles(counts)
N = counts.sum()
return 1 - (f1 / N)
def heip_e(counts):
"""Calculate Heip's evenness measure.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Heip's evenness measure.
Notes
-----
The implementation here is based on the description in [1]_.
References
----------
.. [1] Heip, C. 1974. A new index measuring evenness. J. Mar. Biol. Ass.
UK., 54, 555-557.
"""
counts = _validate(counts)
return ((np.exp(shannon(counts, base=np.e)) - 1) /
(observed_otus(counts) - 1))
def kempton_taylor_q(counts, lower_quantile=0.25, upper_quantile=0.75):
"""Calculate Kempton-Taylor Q index of alpha diversity.
Estimates the slope of the cumulative abundance curve in the interquantile
range. By default, uses lower and upper quartiles, rounding inwards.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
lower_quantile : float, optional
Lower bound of the interquantile range. Defaults to lower quartile.
upper_quantile : float, optional
Upper bound of the interquantile range. Defaults to upper quartile.
Returns
-------
double
Kempton-Taylor Q index of alpha diversity.
Notes
-----
The index is defined in [1]_. The implementation here is based on the
description given in the SDR-IV online manual [2]_.
The implementation provided here differs slightly from the results given in
Magurran 1998. Specifically, we have 14 in the numerator rather than 15.
Magurran recommends counting half of the OTUs with the same # counts as the
point where the UQ falls and the point where the LQ falls, but the
justification for this is unclear (e.g. if there were a very large # OTUs
that just overlapped one of the quantiles, the results would be
considerably off). Leaving the calculation as-is for now, but consider
changing.
References
----------
.. [1] Kempton, R. A. and Taylor, L. R. (1976) Models and statistics for
species diversity. Nature, 262, 818-820.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
n = len(counts)
lower = int(np.ceil(n * lower_quantile))
upper = int(n * upper_quantile)
sorted_counts = np.sort(counts)
return (upper - lower) / np.log(sorted_counts[upper] /
sorted_counts[lower])
def margalef(counts):
"""Calculate Margalef's richness index, which is defined as:
.. math::
D = \\frac{(S - 1)}{\\ln N}
where :math:`S` is the species number and :math:`N` is the
total number of individuals (sum of abundances for all OTUs).
Assumes log accumulation.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Margalef's richness index.
Notes
-----
Based on the description in [1]_.
References
----------
.. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
76-77.
"""
counts = _validate(counts)
return (observed_otus(counts) - 1) / np.log(counts.sum())
def mcintosh_d(counts):
"""Calculate McIntosh dominance index D, which is defined as:
.. math::
D = \\frac{N - U}{N - \\sqrt{N}}
where :math:`N` is the total number of individuals (sum of abundances for
all OTUs) and :math:`U` is given as:
.. math::
U = \\sqrt{\\sum{{n_i}^2}}
where :math:`n_i` is the sum of abundances for all OTUs in the
:math:`i_{th}` species.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
McIntosh dominance index D.
See Also
--------
mcintosh_e
Notes
-----
The index was proposed in [1]_. The implementation here is based on the
description given in the SDR-IV online manual [2]_.
References
----------
.. [1] McIntosh, R. P. 1967 An index of diversity and the relation of
certain concepts to diversity. Ecology 48, 1115-1126.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
u = np.sqrt((counts * counts).sum())
n = counts.sum()
return (n - u) / (n - np.sqrt(n))
def mcintosh_e(counts):
"""Calculate McIntosh's evenness measure E.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
McIntosh evenness measure E.
See Also
--------
mcintosh_d
Notes
-----
The implementation here is based on the description given in [1]_, *NOT*
the one in the SDR-IV online manual, which is wrong.
References
----------
.. [1] Heip & Engels 1974 p 560.
"""
counts = _validate(counts)
numerator = np.sqrt((counts * counts).sum())
n = counts.sum()
s = observed_otus(counts)
denominator = np.sqrt((n - s + 1) ** 2 + s - 1)
return numerator / denominator
def menhinick(counts):
"""Calculate Menhinick's richness index.
Assumes square-root accumulation.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Menhinick's richness index.
Notes
-----
Based on the description in [1]_.
References
----------
.. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
76-77.
"""
counts = _validate(counts)
return observed_otus(counts) / np.sqrt(counts.sum())
def michaelis_menten_fit(counts, num_repeats=1, params_guess=None):
"""Calculate Michaelis-Menten fit to rarefaction curve of observed OTUs.
The Michaelis-Menten equation is defined as
.. math::
S=\\frac{nS_{max}}{n+B}
where :math:`n` is the number of individuals and :math:`S` is the number of
OTUs. This function estimates the :math:`S_{max}` parameter.
The fit is made to datapoints for :math:`n=1,2,...,N`, where :math:`N` is
the total number of individuals (sum of abundances for all OTUs).
:math:`S` is the number of OTUs represented in a random sample of :math:`n`
individuals.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
num_repeats : int, optional
The number of times to perform rarefaction (subsampling without
replacement) at each value of :math:`n`.
params_guess : tuple, optional
Initial guess of :math:`S_{max}` and :math:`B`. If ``None``, default
guess for :math:`S_{max}` is :math:`S` (as :math:`S_{max}` should
be >= :math:`S`) and default guess for :math:`B` is ``round(N / 2)``.
Returns
-------
S_max : double
Estimate of the :math:`S_{max}` parameter in the Michaelis-Menten
equation.
See Also
--------
skbio.math.subsample
Notes
-----
There is some controversy about how to do the fitting. The ML model given
in [1]_ is based on the assumption that error is roughly proportional to
magnitude of observation, reasonable for enzyme kinetics but not reasonable
for rarefaction data. Here we just do a nonlinear curve fit for the
parameters using least-squares.
References
----------
.. [1] Raaijmakers, J. G. W. 1987 Statistical analysis of the
Michaelis-Menten equation. Biometrics 43, 793-803.
"""
counts = _validate(counts)
n_indiv = counts.sum()
if params_guess is None:
S_max_guess = observed_otus(counts)
B_guess = int(round(n_indiv / 2))
params_guess = (S_max_guess, B_guess)
# observed # of OTUs vs # of individuals sampled, S vs n
xvals = np.arange(1, n_indiv + 1)
ymtx = np.empty((num_repeats, len(xvals)), dtype=int)
for i in range(num_repeats):
ymtx[i] = np.asarray([observed_otus(subsample(counts, n))
for n in xvals], dtype=int)
yvals = ymtx.mean(0)
# Vectors of actual vals y and number of individuals n.
def errfn(p, n, y):
return (((p[0] * n / (p[1] + n)) - y) ** 2).sum()
# Return S_max.
return fmin_powell(errfn, params_guess, ftol=1e-5, args=(xvals, yvals),
disp=False)[0]
def observed_otus(counts):
"""Calculate the number of distinct OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Distinct OTU count.
"""
counts = _validate(counts)
return (counts != 0).sum()
def osd(counts):
"""Calculate observed OTUs, singles, and doubles.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
osd : tuple
Observed OTUs, singles, and doubles.
See Also
--------
observed_otus
singles
doubles
Notes
-----
This is a convenience function used by many of the other measures that rely
on these three measures.
"""
counts = _validate(counts)
return observed_otus(counts), singles(counts), doubles(counts)
def robbins(counts):
"""Calculate Robbins' estimator for the probability of unobserved outcomes.
Robbins' estimator is defined as
.. math::
\\frac{F_1}{n+1}
where :math:`F_1` is the number of singleton OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Robbins' estimate.
Notes
-----
Robbins' estimator is defined in [1]_. The estimate computed here is for
:math:`n-1` counts, i.e. the x-axis is off by 1.
References
----------
.. [1] Robbins, H. E (1968). Ann. of Stats. Vol 36, pp. 256-257.
"""
counts = _validate(counts)
return singles(counts) / counts.sum()
def shannon(counts, base=2):
"""Calculate Shannon entropy of counts (H), default in bits.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
base : scalar, optional
Logarithm base to use in the calculations.
Returns
-------
double
Shannon diversity index H.
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_, except that the default logarithm base used here is 2
instead of :math:`e`.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
freqs = counts / counts.sum()
nonzero_freqs = freqs[freqs.nonzero()]
return -(nonzero_freqs * np.log(nonzero_freqs)).sum() / np.log(base)
def simpson(counts):
"""Calculate Simpson's index.
Simpson's index is defined as 1 - dominance.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Simpson's index.
See Also
--------
dominance
Notes
-----
The implementation here is ``1 - dominance`` as described in [1]_. Other
references (such as [2]_) define Simpson's index as ``1 / dominance``.
References
----------
.. [1] http://folk.uio.no/ohammer/past/diversity.html
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
return 1 - dominance(counts)
def simpson_e(counts):
"""Calculate Simpson's evenness measure E.
Simpson's E is defined as
.. math::
E=\\frac{1 / D}{S_{obs}}
where :math:`D` is dominance and :math:`S_{obs}` is the number of observed
OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Simpson's evenness measure E.
See Also
--------
dominance
enspie
simpson
Notes
-----
The implementation here is based on the description given in [1]_.
References
----------
.. [1] http://www.tiem.utk.edu/~gross/bioed/bealsmodules/simpsonDI.html
"""
counts = _validate(counts)
return enspie(counts) / observed_otus(counts)
def singles(counts):
"""Calculate number of single occurrences (singletons).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Singleton count.
"""
counts = _validate(counts)
return (counts == 1).sum()
def strong(counts):
"""Calculate Strong's dominance index (Dw).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Strong's dominance index (Dw).
Notes
-----
Strong's dominance index is defined in [1]_. The implementation here is
based on the description given in the SDR-IV online manual [2]_.
References
----------
.. [1] Strong, W. L., 2002 Assessing species abundance uneveness within and
between plant communities. Community Ecology, 3, 237-246.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
n = counts.sum()
s = observed_otus(counts)
i = np.arange(1, len(counts) + 1)
sorted_sum = np.sort(counts)[::-1].cumsum()
return (sorted_sum / n - (i / s)).max()
| |
# Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Functions and classes common to multiple pymongo modules."""
import collections
from bson.binary import (STANDARD, PYTHON_LEGACY,
JAVA_LEGACY, CSHARP_LEGACY)
from bson.codec_options import CodecOptions
from bson.py3compat import string_type, integer_types
from pymongo.auth import MECHANISMS
from pymongo.errors import ConfigurationError
from pymongo.read_preferences import (read_pref_mode_from_name,
_ServerMode)
from pymongo.ssl_support import validate_cert_reqs
from pymongo.write_concern import WriteConcern
# Defaults until we connect to a server and get updated limits.
MAX_BSON_SIZE = 16 * (1024 ** 2)
MAX_MESSAGE_SIZE = 2 * MAX_BSON_SIZE
MIN_WIRE_VERSION = 0
MAX_WIRE_VERSION = 0
MAX_WRITE_BATCH_SIZE = 1000
# What this version of PyMongo supports.
MIN_SUPPORTED_WIRE_VERSION = 0
MAX_SUPPORTED_WIRE_VERSION = 3
# Frequency to call ismaster on servers, in seconds.
HEARTBEAT_FREQUENCY = 10
# Frequency to process kill-cursors, in seconds. See MongoClient.close_cursor.
KILL_CURSOR_FREQUENCY = 1
# How long to wait, in seconds, for a suitable server to be found before
# aborting an operation. For example, if the client attempts an insert
# during a replica set election, SERVER_SELECTION_TIMEOUT governs the
# longest it is willing to wait for a new primary to be found.
SERVER_SELECTION_TIMEOUT = 30
# Spec requires at least 500ms between ismaster calls.
MIN_HEARTBEAT_INTERVAL = 0.5
# Default connectTimeout in seconds.
CONNECT_TIMEOUT = 20.0
# Default value for maxPoolSize.
MAX_POOL_SIZE = 100
# Default value for localThresholdMS.
LOCAL_THRESHOLD_MS = 15
# mongod/s 2.6 and above return code 59 when a
# command doesn't exist. mongod versions previous
# to 2.6 and mongos 2.4.x return no error code
# when a command does exist. mongos versions previous
# to 2.4.0 return code 13390 when a command does not
# exist.
COMMAND_NOT_FOUND_CODES = (59, 13390, None)
# Error codes to ignore if GridFS calls createIndex on a secondary
UNAUTHORIZED_CODES = (13, 16547, 16548)
def partition_node(node):
"""Split a host:port string into (host, int(port)) pair."""
host = node
port = 27017
idx = node.rfind(':')
if idx != -1:
host, port = node[:idx], int(node[idx + 1:])
if host.startswith('['):
host = host[1:-1]
return host, port
def clean_node(node):
"""Split and normalize a node name from an ismaster response."""
host, port = partition_node(node)
# Normalize hostname to lowercase, since DNS is case-insensitive:
# http://tools.ietf.org/html/rfc4343
# This prevents useless rediscovery if "foo.com" is in the seed list but
# "FOO.com" is in the ismaster response.
return host.lower(), port
def raise_config_error(key, dummy):
"""Raise ConfigurationError with the given key name."""
raise ConfigurationError("Unknown option %s" % (key,))
# Mapping of URI uuid representation options to valid subtypes.
_UUID_REPRESENTATIONS = {
'standard': STANDARD,
'pythonLegacy': PYTHON_LEGACY,
'javaLegacy': JAVA_LEGACY,
'csharpLegacy': CSHARP_LEGACY
}
def validate_boolean(option, value):
"""Validates that 'value' is True or False."""
if isinstance(value, bool):
return value
raise TypeError("%s must be True or False" % (option,))
def validate_boolean_or_string(option, value):
"""Validates that value is True, False, 'true', or 'false'."""
if isinstance(value, string_type):
if value not in ('true', 'false'):
raise ValueError("The value of %s must be "
"'true' or 'false'" % (option,))
return value == 'true'
return validate_boolean(option, value)
def validate_integer(option, value):
"""Validates that 'value' is an integer (or basestring representation).
"""
if isinstance(value, integer_types):
return value
elif isinstance(value, string_type):
if not value.isdigit():
raise ValueError("The value of %s must be "
"an integer" % (option,))
return int(value)
raise TypeError("Wrong type for %s, value must be an integer" % (option,))
def validate_positive_integer(option, value):
"""Validate that 'value' is a positive integer, which does not include 0.
"""
val = validate_integer(option, value)
if val <= 0:
raise ValueError("The value of %s must be "
"a positive integer" % (option,))
return val
def validate_non_negative_integer(option, value):
"""Validate that 'value' is a positive integer or 0.
"""
val = validate_integer(option, value)
if val < 0:
raise ValueError("The value of %s must be "
"a non negative integer" % (option,))
return val
def validate_readable(option, value):
"""Validates that 'value' is file-like and readable.
"""
if value is None:
return value
# First make sure its a string py3.3 open(True, 'r') succeeds
# Used in ssl cert checking due to poor ssl module error reporting
value = validate_string(option, value)
open(value, 'r').close()
return value
def validate_positive_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or None.
"""
if value is None:
return value
return validate_positive_integer(option, value)
def validate_non_negative_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or 0 or None.
"""
if value is None:
return value
return validate_non_negative_integer(option, value)
def validate_string(option, value):
"""Validates that 'value' is an instance of `basestring` for Python 2
or `str` for Python 3.
"""
if isinstance(value, string_type):
return value
raise TypeError("Wrong type for %s, value must be "
"an instance of %s" % (option, string_type.__name__))
def validate_string_or_none(option, value):
"""Validates that 'value' is an instance of `basestring` or `None`.
"""
if value is None:
return value
return validate_string(option, value)
def validate_int_or_basestring(option, value):
"""Validates that 'value' is an integer or string.
"""
if isinstance(value, integer_types):
return value
elif isinstance(value, string_type):
if value.isdigit():
return int(value)
return value
raise TypeError("Wrong type for %s, value must be an "
"integer or a string" % (option,))
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
errmsg = "%s must be an integer or float" % (option,)
try:
value = float(value)
except ValueError:
raise ValueError(errmsg)
except TypeError:
raise TypeError(errmsg)
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise ValueError("%s must be greater than 0 and "
"less than one billion" % (option,))
return value
def validate_positive_float_or_zero(option, value):
"""Validates that 'value' is 0 or a positive float, or can be converted to
0 or a positive float.
"""
if value == 0 or value == "0":
return 0
return validate_positive_float(option, value)
def validate_timeout_or_none(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds.
"""
if value is None:
return value
return validate_positive_float(option, value) / 1000.0
def validate_timeout_or_zero(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds for the case where None is an error
and 0 is valid. Setting the timeout to nothing in the URI string is a
config error.
"""
if value is None:
raise ConfigurationError("%s cannot be None" % (option, ))
if value == 0 or value == "0":
return 0
return validate_positive_float(option, value) / 1000.0
def validate_read_preference(dummy, value):
"""Validate a read preference.
"""
if not isinstance(value, _ServerMode):
raise TypeError("%r is not a read preference." % (value,))
return value
def validate_read_preference_mode(dummy, name):
"""Validate read preference mode for a MongoReplicaSetClient.
"""
try:
return read_pref_mode_from_name(name)
except ValueError:
raise ValueError("%s is not a valid read preference" % (name,))
def validate_auth_mechanism(option, value):
"""Validate the authMechanism URI option.
"""
# CRAM-MD5 is for server testing only. Undocumented,
# unsupported, may be removed at any time. You have
# been warned.
if value not in MECHANISMS and value != 'CRAM-MD5':
raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS)))
return value
def validate_uuid_representation(dummy, value):
"""Validate the uuid representation option selected in the URI.
"""
try:
return _UUID_REPRESENTATIONS[value]
except KeyError:
raise ValueError("%s is an invalid UUID representation. "
"Must be one of "
"%s" % (value, tuple(_UUID_REPRESENTATIONS)))
def validate_read_preference_tags(name, value):
"""Parse readPreferenceTags if passed as a client kwarg.
"""
if not isinstance(value, list):
value = [value]
tag_sets = []
for tag_set in value:
if tag_set == '':
tag_sets.append({})
continue
try:
tag_sets.append(dict([tag.split(":")
for tag in tag_set.split(",")]))
except Exception:
raise ValueError("%r not a valid "
"value for %s" % (tag_set, name))
return tag_sets
_MECHANISM_PROPS = frozenset(['SERVICE_NAME'])
def validate_auth_mechanism_properties(option, value):
"""Validate authMechanismProperties."""
value = validate_string(option, value)
props = {}
for opt in value.split(','):
try:
key, val = opt.split(':')
if key not in _MECHANISM_PROPS:
raise ValueError("%s is not a supported auth "
"mechanism property. Must be one of "
"%s." % (key, tuple(_MECHANISM_PROPS)))
props[key] = val
except ValueError:
raise ValueError("auth mechanism properties must be "
"key:value pairs like SERVICE_NAME:"
"mongodb, not %s." % (opt,))
return props
def validate_document_class(option, value):
"""Validate the document_class option."""
if not issubclass(value, collections.MutableMapping):
raise TypeError("%s must be dict, bson.son.SON, or another "
"sublass of collections.MutableMapping" % (option,))
return value
def validate_is_mapping(option, value):
"""Validate the type of method arguments that expect a document."""
if not isinstance(value, collections.Mapping):
raise TypeError("%s must be an instance of dict, bson.son.SON, or "
"other type that inherits from "
"collections.Mapping" % (option,))
def validate_is_mutable_mapping(option, value):
"""Validate the type of method arguments that expect a mutable document."""
if not isinstance(value, collections.MutableMapping):
raise TypeError("%s must be an instance of dict, bson.son.SON, or "
"other type that inherits from "
"collections.MutableMapping" % (option,))
def validate_ok_for_replace(replacement):
"""Validate a replacement document."""
validate_is_mapping("replacement", replacement)
# Replacement can be {}
if replacement:
first = next(iter(replacement))
if first.startswith('$'):
raise ValueError('replacement can not include $ operators')
def validate_ok_for_update(update):
"""Validate an update document."""
validate_is_mapping("update", update)
# Update can not be {}
if not update:
raise ValueError('update only works with $ operators')
first = next(iter(update))
if not first.startswith('$'):
raise ValueError('update only works with $ operators')
# journal is an alias for j,
# wtimeoutms is an alias for wtimeout,
VALIDATORS = {
'replicaset': validate_string_or_none,
'w': validate_int_or_basestring,
'wtimeout': validate_integer,
'wtimeoutms': validate_integer,
'fsync': validate_boolean_or_string,
'j': validate_boolean_or_string,
'journal': validate_boolean_or_string,
'connecttimeoutms': validate_timeout_or_none,
'maxpoolsize': validate_positive_integer_or_none,
'socketkeepalive': validate_boolean_or_string,
'sockettimeoutms': validate_timeout_or_none,
'waitqueuetimeoutms': validate_timeout_or_none,
'waitqueuemultiple': validate_non_negative_integer_or_none,
'ssl': validate_boolean_or_string,
'ssl_keyfile': validate_readable,
'ssl_certfile': validate_readable,
'ssl_cert_reqs': validate_cert_reqs,
'ssl_ca_certs': validate_readable,
'ssl_match_hostname': validate_boolean_or_string,
'read_preference': validate_read_preference,
'readpreference': validate_read_preference_mode,
'readpreferencetags': validate_read_preference_tags,
'localthresholdms': validate_positive_float_or_zero,
'serverselectiontimeoutms': validate_timeout_or_zero,
'authmechanism': validate_auth_mechanism,
'authsource': validate_string,
'authmechanismproperties': validate_auth_mechanism_properties,
'document_class': validate_document_class,
'tz_aware': validate_boolean_or_string,
'uuidrepresentation': validate_uuid_representation,
'connect': validate_boolean
}
_AUTH_OPTIONS = frozenset(['authmechanismproperties'])
def validate_auth_option(option, value):
"""Validate optional authentication parameters.
"""
lower, value = validate(option, value)
if lower not in _AUTH_OPTIONS:
raise ConfigurationError('Unknown '
'authentication option: %s' % (option,))
return lower, value
def validate(option, value):
"""Generic validation function.
"""
lower = option.lower()
validator = VALIDATORS.get(lower, raise_config_error)
value = validator(option, value)
return lower, value
WRITE_CONCERN_OPTIONS = frozenset([
'w',
'wtimeout',
'wtimeoutms',
'fsync',
'j',
'journal'
])
class BaseObject(object):
"""A base class that provides attributes and methods common
to multiple pymongo classes.
SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB.
"""
def __init__(self, codec_options, read_preference, write_concern):
if not isinstance(codec_options, CodecOptions):
raise TypeError("codec_options must be an instance of "
"bson.codec_options.CodecOptions")
self.__codec_options = codec_options
if not isinstance(read_preference, _ServerMode):
raise TypeError("%r is not valid for read_preference. See "
"pymongo.read_preferences for valid "
"options." % (read_preference,))
self.__read_preference = read_preference
if not isinstance(write_concern, WriteConcern):
raise TypeError("write_concern must be an instance of "
"pymongo.write_concern.WriteConcern")
self.__write_concern = write_concern
@property
def codec_options(self):
"""Read only access to the :class:`~bson.codec_options.CodecOptions`
of this instance.
"""
return self.__codec_options
@property
def write_concern(self):
"""Read only access to the :class:`~pymongo.write_concern.WriteConcern`
of this instance.
.. versionchanged:: 3.0
The :attr:`write_concern` attribute is now read only.
"""
return self.__write_concern
@property
def read_preference(self):
"""Read only access to the read preference of this instance.
.. versionchanged:: 3.0
The :attr:`read_preference` attribute is now read only.
"""
return self.__read_preference
| |
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
class PRIORITY:
LOWEST = -100
LOWER = -50
LOW = -10
NORMAL = 0
HIGH = 10
HIGHER = 50
HIGHEST = 100
class SORT_ORDER:
FIRST = 0
SECOND = 1
THIRD = 2
FOURTH = 3
FIFTH = 4
LAST = 100
class DBMS:
ACCESS = "Microsoft Access"
DB2 = "IBM DB2"
FIREBIRD = "Firebird"
MAXDB = "SAP MaxDB"
MSSQL = "Microsoft SQL Server"
MYSQL = "MySQL"
ORACLE = "Oracle"
PGSQL = "PostgreSQL"
SQLITE = "SQLite"
SYBASE = "Sybase"
HSQLDB = "HSQLDB"
INFORMIX = "Informix"
class DBMS_DIRECTORY_NAME:
ACCESS = "access"
DB2 = "db2"
FIREBIRD = "firebird"
MAXDB = "maxdb"
MSSQL = "mssqlserver"
MYSQL = "mysql"
ORACLE = "oracle"
PGSQL = "postgresql"
SQLITE = "sqlite"
SYBASE = "sybase"
HSQLDB = "hsqldb"
INFORMIX = "informix"
class CUSTOM_LOGGING:
PAYLOAD = 9
TRAFFIC_OUT = 8
TRAFFIC_IN = 7
class OS:
LINUX = "Linux"
WINDOWS = "Windows"
class PLACE:
GET = "GET"
POST = "POST"
URI = "URI"
COOKIE = "Cookie"
USER_AGENT = "User-Agent"
REFERER = "Referer"
HOST = "Host"
CUSTOM_POST = "(custom) POST"
CUSTOM_HEADER = "(custom) HEADER"
class POST_HINT:
SOAP = "SOAP"
JSON = "JSON"
JSON_LIKE = "JSON-like"
MULTIPART = "MULTIPART"
XML = "XML (generic)"
ARRAY_LIKE = "Array-like"
class HTTPMETHOD:
GET = "GET"
POST = "POST"
HEAD = "HEAD"
PUT = "PUT"
DELETE = "DELETE"
TRACE = "TRACE"
OPTIONS = "OPTIONS"
CONNECT = "CONNECT"
PATCH = "PATCH"
class NULLCONNECTION:
HEAD = "HEAD"
RANGE = "Range"
SKIP_READ = "skip-read"
class REFLECTIVE_COUNTER:
MISS = "MISS"
HIT = "HIT"
class CHARSET_TYPE:
BINARY = 1
DIGITS = 2
HEXADECIMAL = 3
ALPHA = 4
ALPHANUM = 5
class HEURISTIC_TEST:
CASTED = 1
NEGATIVE = 2
POSITIVE = 3
class HASH:
MYSQL = r'(?i)\A\*[0-9a-f]{40}\Z'
MYSQL_OLD = r'(?i)\A(?![0-9]+\Z)[0-9a-f]{16}\Z'
POSTGRES = r'(?i)\Amd5[0-9a-f]{32}\Z'
MSSQL = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{40}\Z'
MSSQL_OLD = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{80}\Z'
MSSQL_NEW = r'(?i)\A0x0200[0-9a-f]{8}[0-9a-f]{128}\Z'
ORACLE = r'(?i)\As:[0-9a-f]{60}\Z'
ORACLE_OLD = r'(?i)\A[01-9a-f]{16}\Z'
MD5_GENERIC = r'(?i)\A[0-9a-f]{32}\Z'
SHA1_GENERIC = r'(?i)\A[0-9a-f]{40}\Z'
SHA224_GENERIC = r'(?i)\A[0-9a-f]{28}\Z'
SHA384_GENERIC = r'(?i)\A[0-9a-f]{48}\Z'
SHA512_GENERIC = r'(?i)\A[0-9a-f]{64}\Z'
CRYPT_GENERIC = r'(?i)\A(?!\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z)(?![0-9]+\Z)[./0-9A-Za-z]{13}\Z'
WORDPRESS = r'(?i)\A\$P\$[./0-9A-Za-z]{31}\Z'
# Reference: http://www.zytrax.com/tech/web/mobile_ids.html
class MOBILES:
BLACKBERRY = ("BlackBerry 9900", "Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+")
GALAXY = ("Samsung Galaxy S", "Mozilla/5.0 (Linux; U; Android 2.2; en-US; SGH-T959D Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1")
HP = ("HP iPAQ 6365", "Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)")
HTC = ("HTC Sensation", "Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30")
IPHONE = ("Apple iPhone 4s", "Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3")
NEXUS = ("Google Nexus 7", "Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19")
NOKIA = ("Nokia N97", "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344")
class PROXY_TYPE:
HTTP = "HTTP"
HTTPS = "HTTPS"
SOCKS4 = "SOCKS4"
SOCKS5 = "SOCKS5"
class REGISTRY_OPERATION:
READ = "read"
ADD = "add"
DELETE = "delete"
class DUMP_FORMAT:
CSV = "CSV"
HTML = "HTML"
SQLITE = "SQLITE"
class HTTP_HEADER:
ACCEPT = "Accept"
ACCEPT_CHARSET = "Accept-Charset"
ACCEPT_ENCODING = "Accept-Encoding"
ACCEPT_LANGUAGE = "Accept-Language"
AUTHORIZATION = "Authorization"
CACHE_CONTROL = "Cache-Control"
CONNECTION = "Connection"
CONTENT_ENCODING = "Content-Encoding"
CONTENT_LENGTH = "Content-Length"
CONTENT_RANGE = "Content-Range"
CONTENT_TYPE = "Content-Type"
COOKIE = "Cookie"
EXPIRES = "Expires"
HOST = "Host"
IF_MODIFIED_SINCE = "If-Modified-Since"
LAST_MODIFIED = "Last-Modified"
LOCATION = "Location"
PRAGMA = "Pragma"
PROXY_AUTHORIZATION = "Proxy-Authorization"
PROXY_CONNECTION = "Proxy-Connection"
RANGE = "Range"
REFERER = "Referer"
REFRESH = "Refresh" # Reference: http://stackoverflow.com/a/283794
SERVER = "Server"
SET_COOKIE = "Set-Cookie"
TRANSFER_ENCODING = "Transfer-Encoding"
URI = "URI"
USER_AGENT = "User-Agent"
VIA = "Via"
X_POWERED_BY = "X-Powered-By"
class EXPECTED:
BOOL = "bool"
INT = "int"
class OPTION_TYPE:
BOOLEAN = "boolean"
INTEGER = "integer"
FLOAT = "float"
STRING = "string"
class HASHDB_KEYS:
DBMS = "DBMS"
DBMS_FORK = "DBMS_FORK"
CHECK_WAF_RESULT = "CHECK_WAF_RESULT"
CONF_TMP_PATH = "CONF_TMP_PATH"
KB_ABS_FILE_PATHS = "KB_ABS_FILE_PATHS"
KB_BRUTE_COLUMNS = "KB_BRUTE_COLUMNS"
KB_BRUTE_TABLES = "KB_BRUTE_TABLES"
KB_CHARS = "KB_CHARS"
KB_DYNAMIC_MARKINGS = "KB_DYNAMIC_MARKINGS"
KB_INJECTIONS = "KB_INJECTIONS"
KB_ERROR_CHUNK_LENGTH = "KB_ERROR_CHUNK_LENGTH"
KB_XP_CMDSHELL_AVAILABLE = "KB_XP_CMDSHELL_AVAILABLE"
OS = "OS"
class REDIRECTION:
YES = "Y"
NO = "N"
class PAYLOAD:
SQLINJECTION = {
1: "boolean-based blind",
2: "error-based",
3: "inline query",
4: "stacked queries",
5: "AND/OR time-based blind",
6: "UNION query",
}
PARAMETER = {
1: "Unescaped numeric",
2: "Single quoted string",
3: "LIKE single quoted string",
4: "Double quoted string",
5: "LIKE double quoted string",
}
RISK = {
0: "No risk",
1: "Low risk",
2: "Medium risk",
3: "High risk",
}
CLAUSE = {
0: "Always",
1: "WHERE",
2: "GROUP BY",
3: "ORDER BY",
4: "LIMIT",
5: "OFFSET",
6: "TOP",
7: "Table name",
8: "Column name",
}
class METHOD:
COMPARISON = "comparison"
GREP = "grep"
TIME = "time"
UNION = "union"
class TECHNIQUE:
BOOLEAN = 1
ERROR = 2
QUERY = 3
STACKED = 4
TIME = 5
UNION = 6
class WHERE:
ORIGINAL = 1
NEGATIVE = 2
REPLACE = 3
class WIZARD:
BASIC = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba")
INTERMEDIATE = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getUsers", "getDbs", "getTables", "getSchema", "excludeSysDbs")
ALL = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getHostname", "getUsers", "getPasswordHashes", "getPrivileges", "getRoles", "dumpAll")
class ADJUST_TIME_DELAY:
DISABLE = -1
NO = 0
YES = 1
class WEB_API:
PHP = "php"
ASP = "asp"
ASPX = "aspx"
JSP = "jsp"
class CONTENT_TYPE:
TARGET = 0
TECHNIQUES = 1
DBMS_FINGERPRINT = 2
BANNER = 3
CURRENT_USER = 4
CURRENT_DB = 5
HOSTNAME = 6
IS_DBA = 7
USERS = 8
PASSWORDS = 9
PRIVILEGES = 10
ROLES = 11
DBS = 12
TABLES = 13
COLUMNS = 14
SCHEMA = 15
COUNT = 16
DUMP_TABLE = 17
SEARCH = 18
SQL_QUERY = 19
COMMON_TABLES = 20
COMMON_COLUMNS = 21
FILE_READ = 22
FILE_WRITE = 23
OS_CMD = 24
REG_READ = 25
PART_RUN_CONTENT_TYPES = {
"checkDbms": CONTENT_TYPE.TECHNIQUES,
"getFingerprint": CONTENT_TYPE.DBMS_FINGERPRINT,
"getBanner": CONTENT_TYPE.BANNER,
"getCurrentUser": CONTENT_TYPE.CURRENT_USER,
"getCurrentDb": CONTENT_TYPE.CURRENT_DB,
"getHostname": CONTENT_TYPE.HOSTNAME,
"isDba": CONTENT_TYPE.IS_DBA,
"getUsers": CONTENT_TYPE.USERS,
"getPasswordHashes": CONTENT_TYPE.PASSWORDS,
"getPrivileges": CONTENT_TYPE.PRIVILEGES,
"getRoles": CONTENT_TYPE.ROLES,
"getDbs": CONTENT_TYPE.DBS,
"getTables": CONTENT_TYPE.TABLES,
"getColumns": CONTENT_TYPE.COLUMNS,
"getSchema": CONTENT_TYPE.SCHEMA,
"getCount": CONTENT_TYPE.COUNT,
"dumpTable": CONTENT_TYPE.DUMP_TABLE,
"search": CONTENT_TYPE.SEARCH,
"sqlQuery": CONTENT_TYPE.SQL_QUERY,
"tableExists": CONTENT_TYPE.COMMON_TABLES,
"columnExists": CONTENT_TYPE.COMMON_COLUMNS,
"readFile": CONTENT_TYPE.FILE_READ,
"writeFile": CONTENT_TYPE.FILE_WRITE,
"osCmd": CONTENT_TYPE.OS_CMD,
"regRead": CONTENT_TYPE.REG_READ
}
class CONTENT_STATUS:
IN_PROGRESS = 0
COMPLETE = 1
class AUTH_TYPE:
BASIC = "basic"
DIGEST = "digest"
NTLM = "ntlm"
PKI = "pki"
class AUTOCOMPLETE_TYPE:
SQL = 0
OS = 1
SQLMAP = 2
class NOTE:
FALSE_POSITIVE_OR_UNEXPLOITABLE = "false positive or unexploitable"
class MKSTEMP_PREFIX:
HASHES = "sqlmaphashes-"
CRAWLER = "sqlmapcrawler-"
IPC = "sqlmapipc-"
TESTING = "sqlmaptesting-"
RESULTS = "sqlmapresults-"
COOKIE_JAR = "sqlmapcookiejar-"
BIG_ARRAY = "sqlmapbigarray-"
class TIMEOUT_STATE:
NORMAL = 0
EXCEPTION = 1
TIMEOUT = 2
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from keystoneauth1.auth.identity import v2
from keystoneauth1 import exceptions
from keystoneauth1 import session
from keystoneauth1.tests.unit import utils
class V2IdentityPlugin(utils.TestCase):
TEST_ROOT_URL = 'http://127.0.0.1:5000/'
TEST_URL = '%s%s' % (TEST_ROOT_URL, 'v2.0')
TEST_ROOT_ADMIN_URL = 'http://127.0.0.1:35357/'
TEST_ADMIN_URL = '%s%s' % (TEST_ROOT_ADMIN_URL, 'v2.0')
TEST_PASS = 'password'
TEST_SERVICE_CATALOG = [{
"endpoints": [{
"adminURL": "http://cdn.admin-nets.local:8774/v1.0",
"region": "RegionOne",
"internalURL": "http://127.0.0.1:8774/v1.0",
"publicURL": "http://cdn.admin-nets.local:8774/v1.0/"
}],
"type": "nova_compat",
"name": "nova_compat"
}, {
"endpoints": [{
"adminURL": "http://nova/novapi/admin",
"region": "RegionOne",
"internalURL": "http://nova/novapi/internal",
"publicURL": "http://nova/novapi/public"
}],
"type": "compute",
"name": "nova"
}, {
"endpoints": [{
"adminURL": "http://glance/glanceapi/admin",
"region": "RegionOne",
"internalURL": "http://glance/glanceapi/internal",
"publicURL": "http://glance/glanceapi/public"
}],
"type": "image",
"name": "glance"
}, {
"endpoints": [{
"adminURL": TEST_ADMIN_URL,
"region": "RegionOne",
"internalURL": "http://127.0.0.1:5000/v2.0",
"publicURL": "http://127.0.0.1:5000/v2.0"
}],
"type": "identity",
"name": "keystone"
}, {
"endpoints": [{
"adminURL": "http://swift/swiftapi/admin",
"region": "RegionOne",
"internalURL": "http://swift/swiftapi/internal",
"publicURL": "http://swift/swiftapi/public"
}],
"type": "object-store",
"name": "swift"
}]
def setUp(self):
super(V2IdentityPlugin, self).setUp()
self.TEST_RESPONSE_DICT = {
"access": {
"token": {
"expires": "2020-01-01T00:00:10.000123Z",
"id": self.TEST_TOKEN,
"tenant": {
"id": self.TEST_TENANT_ID
},
},
"user": {
"id": self.TEST_USER
},
"serviceCatalog": self.TEST_SERVICE_CATALOG,
},
}
def stub_auth(self, **kwargs):
self.stub_url('POST', ['tokens'], **kwargs)
def test_authenticate_with_username_password(self):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
a = v2.Password(self.TEST_URL, username=self.TEST_USER,
password=self.TEST_PASS)
self.assertIsNone(a.user_id)
s = session.Session(a)
self.assertEqual({'X-Auth-Token': self.TEST_TOKEN},
s.get_auth_headers())
req = {'auth': {'passwordCredentials': {'username': self.TEST_USER,
'password': self.TEST_PASS}}}
self.assertRequestBodyIs(json=req)
self.assertRequestHeaderEqual('Content-Type', 'application/json')
self.assertRequestHeaderEqual('Accept', 'application/json')
self.assertEqual(s.auth.auth_ref.auth_token, self.TEST_TOKEN)
def test_authenticate_with_user_id_password(self):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
a = v2.Password(self.TEST_URL, user_id=self.TEST_USER,
password=self.TEST_PASS)
self.assertIsNone(a.username)
s = session.Session(a)
self.assertEqual({'X-Auth-Token': self.TEST_TOKEN},
s.get_auth_headers())
req = {'auth': {'passwordCredentials': {'userId': self.TEST_USER,
'password': self.TEST_PASS}}}
self.assertRequestBodyIs(json=req)
self.assertRequestHeaderEqual('Content-Type', 'application/json')
self.assertRequestHeaderEqual('Accept', 'application/json')
self.assertEqual(s.auth.auth_ref.auth_token, self.TEST_TOKEN)
def test_authenticate_with_username_password_scoped(self):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
a = v2.Password(self.TEST_URL, username=self.TEST_USER,
password=self.TEST_PASS, tenant_id=self.TEST_TENANT_ID)
self.assertIsNone(a.user_id)
s = session.Session(a)
self.assertEqual({'X-Auth-Token': self.TEST_TOKEN},
s.get_auth_headers())
req = {'auth': {'passwordCredentials': {'username': self.TEST_USER,
'password': self.TEST_PASS},
'tenantId': self.TEST_TENANT_ID}}
self.assertRequestBodyIs(json=req)
self.assertEqual(s.auth.auth_ref.auth_token, self.TEST_TOKEN)
def test_authenticate_with_user_id_password_scoped(self):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
a = v2.Password(self.TEST_URL, user_id=self.TEST_USER,
password=self.TEST_PASS, tenant_id=self.TEST_TENANT_ID)
self.assertIsNone(a.username)
s = session.Session(a)
self.assertEqual({'X-Auth-Token': self.TEST_TOKEN},
s.get_auth_headers())
req = {'auth': {'passwordCredentials': {'userId': self.TEST_USER,
'password': self.TEST_PASS},
'tenantId': self.TEST_TENANT_ID}}
self.assertRequestBodyIs(json=req)
self.assertEqual(s.auth.auth_ref.auth_token, self.TEST_TOKEN)
def test_authenticate_with_token(self):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
a = v2.Token(self.TEST_URL, 'foo')
s = session.Session(a)
self.assertEqual({'X-Auth-Token': self.TEST_TOKEN},
s.get_auth_headers())
req = {'auth': {'token': {'id': 'foo'}}}
self.assertRequestBodyIs(json=req)
self.assertRequestHeaderEqual('x-Auth-Token', 'foo')
self.assertRequestHeaderEqual('Content-Type', 'application/json')
self.assertRequestHeaderEqual('Accept', 'application/json')
self.assertEqual(s.auth.auth_ref.auth_token, self.TEST_TOKEN)
def test_with_trust_id(self):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
a = v2.Password(self.TEST_URL, username=self.TEST_USER,
password=self.TEST_PASS, trust_id='trust')
s = session.Session(a)
self.assertEqual({'X-Auth-Token': self.TEST_TOKEN},
s.get_auth_headers())
req = {'auth': {'passwordCredentials': {'username': self.TEST_USER,
'password': self.TEST_PASS},
'trust_id': 'trust'}}
self.assertRequestBodyIs(json=req)
self.assertEqual(s.auth.auth_ref.auth_token, self.TEST_TOKEN)
def _do_service_url_test(self, base_url, endpoint_filter):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
self.stub_url('GET', ['path'],
base_url=base_url,
text='SUCCESS', status_code=200)
a = v2.Password(self.TEST_URL, username=self.TEST_USER,
password=self.TEST_PASS)
s = session.Session(auth=a)
resp = s.get('/path', endpoint_filter=endpoint_filter)
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.requests_mock.last_request.url,
base_url + '/path')
def test_service_url(self):
endpoint_filter = {'service_type': 'compute',
'interface': 'admin',
'service_name': 'nova'}
self._do_service_url_test('http://nova/novapi/admin', endpoint_filter)
def test_service_url_defaults_to_public(self):
endpoint_filter = {'service_type': 'compute'}
self._do_service_url_test('http://nova/novapi/public', endpoint_filter)
def test_endpoint_filter_without_service_type_fails(self):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
a = v2.Password(self.TEST_URL, username=self.TEST_USER,
password=self.TEST_PASS)
s = session.Session(auth=a)
self.assertRaises(exceptions.EndpointNotFound, s.get, '/path',
endpoint_filter={'interface': 'admin'})
def test_full_url_overrides_endpoint_filter(self):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
self.stub_url('GET', [],
base_url='http://testurl/',
text='SUCCESS', status_code=200)
a = v2.Password(self.TEST_URL, username=self.TEST_USER,
password=self.TEST_PASS)
s = session.Session(auth=a)
resp = s.get('http://testurl/',
endpoint_filter={'service_type': 'compute'})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.text, 'SUCCESS')
def test_invalid_auth_response_dict(self):
self.stub_auth(json={'hello': 'world'})
a = v2.Password(self.TEST_URL, username=self.TEST_USER,
password=self.TEST_PASS)
s = session.Session(auth=a)
self.assertRaises(exceptions.InvalidResponse, s.get, 'http://any',
authenticated=True)
def test_invalid_auth_response_type(self):
self.stub_url('POST', ['tokens'], text='testdata')
a = v2.Password(self.TEST_URL, username=self.TEST_USER,
password=self.TEST_PASS)
s = session.Session(auth=a)
self.assertRaises(exceptions.InvalidResponse, s.get, 'http://any',
authenticated=True)
def test_invalidate_response(self):
resp_data1 = copy.deepcopy(self.TEST_RESPONSE_DICT)
resp_data2 = copy.deepcopy(self.TEST_RESPONSE_DICT)
resp_data1['access']['token']['id'] = 'token1'
resp_data2['access']['token']['id'] = 'token2'
auth_responses = [{'json': resp_data1}, {'json': resp_data2}]
self.stub_auth(response_list=auth_responses)
a = v2.Password(self.TEST_URL, username=self.TEST_USER,
password=self.TEST_PASS)
s = session.Session(auth=a)
self.assertEqual('token1', s.get_token())
self.assertEqual({'X-Auth-Token': 'token1'}, s.get_auth_headers())
a.invalidate()
self.assertEqual('token2', s.get_token())
self.assertEqual({'X-Auth-Token': 'token2'}, s.get_auth_headers())
def test_doesnt_log_password(self):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
password = uuid.uuid4().hex
a = v2.Password(self.TEST_URL, username=self.TEST_USER,
password=password)
s = session.Session(auth=a)
self.assertEqual(self.TEST_TOKEN, s.get_token())
self.assertEqual({'X-Auth-Token': self.TEST_TOKEN},
s.get_auth_headers())
self.assertNotIn(password, self.logger.output)
def test_password_with_no_user_id_or_name(self):
self.assertRaises(TypeError,
v2.Password, self.TEST_URL, password=self.TEST_PASS)
| |
import copy
import pickle
import unittest
class DictSetTest(unittest.TestCase):
def test_constructors_not_callable(self):
kt = type({}.keys())
self.assertRaises(TypeError, kt, {})
self.assertRaises(TypeError, kt)
it = type({}.items())
self.assertRaises(TypeError, it, {})
self.assertRaises(TypeError, it)
vt = type({}.values())
self.assertRaises(TypeError, vt, {})
self.assertRaises(TypeError, vt)
def test_dict_keys(self):
d = {1: 10, "a": "ABC"}
keys = d.keys()
self.assertEqual(len(keys), 2)
self.assertEqual(set(keys), {1, "a"})
self.assertEqual(keys, {1, "a"})
self.assertNotEqual(keys, {1, "a", "b"})
self.assertNotEqual(keys, {1, "b"})
self.assertNotEqual(keys, {1})
self.assertNotEqual(keys, 42)
self.assertIn(1, keys)
self.assertIn("a", keys)
self.assertNotIn(10, keys)
self.assertNotIn("Z", keys)
self.assertEqual(d.keys(), d.keys())
e = {1: 11, "a": "def"}
self.assertEqual(d.keys(), e.keys())
del e["a"]
self.assertNotEqual(d.keys(), e.keys())
def test_dict_items(self):
d = {1: 10, "a": "ABC"}
items = d.items()
self.assertEqual(len(items), 2)
self.assertEqual(set(items), {(1, 10), ("a", "ABC")})
self.assertEqual(items, {(1, 10), ("a", "ABC")})
self.assertNotEqual(items, {(1, 10), ("a", "ABC"), "junk"})
self.assertNotEqual(items, {(1, 10), ("a", "def")})
self.assertNotEqual(items, {(1, 10)})
self.assertNotEqual(items, 42)
self.assertIn((1, 10), items)
self.assertIn(("a", "ABC"), items)
self.assertNotIn((1, 11), items)
self.assertNotIn(1, items)
self.assertNotIn((), items)
self.assertNotIn((1,), items)
self.assertNotIn((1, 2, 3), items)
self.assertEqual(d.items(), d.items())
e = d.copy()
self.assertEqual(d.items(), e.items())
e["a"] = "def"
self.assertNotEqual(d.items(), e.items())
def test_dict_mixed_keys_items(self):
d = {(1, 1): 11, (2, 2): 22}
e = {1: 1, 2: 2}
self.assertEqual(d.keys(), e.items())
self.assertNotEqual(d.items(), e.keys())
def test_dict_values(self):
d = {1: 10, "a": "ABC"}
values = d.values()
self.assertEqual(set(values), {10, "ABC"})
self.assertEqual(len(values), 2)
def test_dict_repr(self):
d = {1: 10, "a": "ABC"}
self.assertIsInstance(repr(d), str)
r = repr(d.items())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_items([('a', 'ABC'), (1, 10)])" or
r == "dict_items([(1, 10), ('a', 'ABC')])")
r = repr(d.keys())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_keys(['a', 1])" or
r == "dict_keys([1, 'a'])")
r = repr(d.values())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_values(['ABC', 10])" or
r == "dict_values([10, 'ABC'])")
def test_keys_set_operations(self):
d1 = {'a': 1, 'b': 2}
d2 = {'b': 3, 'c': 2}
d3 = {'d': 4, 'e': 5}
self.assertEqual(d1.keys() & d1.keys(), {'a', 'b'})
self.assertEqual(d1.keys() & d2.keys(), {'b'})
self.assertEqual(d1.keys() & d3.keys(), set())
self.assertEqual(d1.keys() & set(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() & set(d2.keys()), {'b'})
self.assertEqual(d1.keys() & set(d3.keys()), set())
self.assertEqual(d1.keys() & tuple(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() | d1.keys(), {'a', 'b'})
self.assertEqual(d1.keys() | d2.keys(), {'a', 'b', 'c'})
self.assertEqual(d1.keys() | d3.keys(), {'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() | set(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() | set(d2.keys()), {'a', 'b', 'c'})
self.assertEqual(d1.keys() | set(d3.keys()),
{'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() | (1, 2), {'a', 'b', 1, 2})
self.assertEqual(d1.keys() ^ d1.keys(), set())
self.assertEqual(d1.keys() ^ d2.keys(), {'a', 'c'})
self.assertEqual(d1.keys() ^ d3.keys(), {'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() ^ set(d1.keys()), set())
self.assertEqual(d1.keys() ^ set(d2.keys()), {'a', 'c'})
self.assertEqual(d1.keys() ^ set(d3.keys()),
{'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() ^ tuple(d2.keys()), {'a', 'c'})
self.assertEqual(d1.keys() - d1.keys(), set())
self.assertEqual(d1.keys() - d2.keys(), {'a'})
self.assertEqual(d1.keys() - d3.keys(), {'a', 'b'})
self.assertEqual(d1.keys() - set(d1.keys()), set())
self.assertEqual(d1.keys() - set(d2.keys()), {'a'})
self.assertEqual(d1.keys() - set(d3.keys()), {'a', 'b'})
self.assertEqual(d1.keys() - (0, 1), {'a', 'b'})
self.assertFalse(d1.keys().isdisjoint(d1.keys()))
self.assertFalse(d1.keys().isdisjoint(d2.keys()))
self.assertFalse(d1.keys().isdisjoint(list(d2.keys())))
self.assertFalse(d1.keys().isdisjoint(set(d2.keys())))
self.assertTrue(d1.keys().isdisjoint({'x', 'y', 'z'}))
self.assertTrue(d1.keys().isdisjoint(['x', 'y', 'z']))
self.assertTrue(d1.keys().isdisjoint(set(['x', 'y', 'z'])))
self.assertTrue(d1.keys().isdisjoint(set(['x', 'y'])))
self.assertTrue(d1.keys().isdisjoint(['x', 'y']))
self.assertTrue(d1.keys().isdisjoint({}))
self.assertTrue(d1.keys().isdisjoint(d3.keys()))
de = {}
self.assertTrue(de.keys().isdisjoint(set()))
self.assertTrue(de.keys().isdisjoint([]))
self.assertTrue(de.keys().isdisjoint(de.keys()))
self.assertTrue(de.keys().isdisjoint([1]))
def test_items_set_operations(self):
d1 = {'a': 1, 'b': 2}
d2 = {'a': 2, 'b': 2}
d3 = {'d': 4, 'e': 5}
self.assertEqual(
d1.items() & d1.items(), {('a', 1), ('b', 2)})
self.assertEqual(d1.items() & d2.items(), {('b', 2)})
self.assertEqual(d1.items() & d3.items(), set())
self.assertEqual(d1.items() & set(d1.items()),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() & set(d2.items()), {('b', 2)})
self.assertEqual(d1.items() & set(d3.items()), set())
self.assertEqual(d1.items() | d1.items(),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() | d2.items(),
{('a', 1), ('a', 2), ('b', 2)})
self.assertEqual(d1.items() | d3.items(),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() | set(d1.items()),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() | set(d2.items()),
{('a', 1), ('a', 2), ('b', 2)})
self.assertEqual(d1.items() | set(d3.items()),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() ^ d1.items(), set())
self.assertEqual(d1.items() ^ d2.items(),
{('a', 1), ('a', 2)})
self.assertEqual(d1.items() ^ d3.items(),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() - d1.items(), set())
self.assertEqual(d1.items() - d2.items(), {('a', 1)})
self.assertEqual(d1.items() - d3.items(), {('a', 1), ('b', 2)})
self.assertEqual(d1.items() - set(d1.items()), set())
self.assertEqual(d1.items() - set(d2.items()), {('a', 1)})
self.assertEqual(d1.items() - set(d3.items()), {('a', 1), ('b', 2)})
self.assertFalse(d1.items().isdisjoint(d1.items()))
self.assertFalse(d1.items().isdisjoint(d2.items()))
self.assertFalse(d1.items().isdisjoint(list(d2.items())))
self.assertFalse(d1.items().isdisjoint(set(d2.items())))
self.assertTrue(d1.items().isdisjoint({'x', 'y', 'z'}))
self.assertTrue(d1.items().isdisjoint(['x', 'y', 'z']))
self.assertTrue(d1.items().isdisjoint(set(['x', 'y', 'z'])))
self.assertTrue(d1.items().isdisjoint(set(['x', 'y'])))
self.assertTrue(d1.items().isdisjoint({}))
self.assertTrue(d1.items().isdisjoint(d3.items()))
de = {}
self.assertTrue(de.items().isdisjoint(set()))
self.assertTrue(de.items().isdisjoint([]))
self.assertTrue(de.items().isdisjoint(de.items()))
self.assertTrue(de.items().isdisjoint([1]))
def test_recursive_repr(self):
d = {}
d[42] = d.values()
self.assertRaises(RecursionError, repr, d)
def test_copy(self):
d = {1: 10, "a": "ABC"}
self.assertRaises(TypeError, copy.copy, d.keys())
self.assertRaises(TypeError, copy.copy, d.values())
self.assertRaises(TypeError, copy.copy, d.items())
def test_compare_error(self):
class Exc(Exception):
pass
class BadEq:
def __hash__(self):
return 7
def __eq__(self, other):
raise Exc
k1, k2 = BadEq(), BadEq()
v1, v2 = BadEq(), BadEq()
d = {k1: v1}
self.assertIn(k1, d)
self.assertIn(k1, d.keys())
self.assertIn(v1, d.values())
self.assertIn((k1, v1), d.items())
self.assertRaises(Exc, d.__contains__, k2)
self.assertRaises(Exc, d.keys().__contains__, k2)
self.assertRaises(Exc, d.items().__contains__, (k2, v1))
self.assertRaises(Exc, d.items().__contains__, (k1, v2))
with self.assertRaises(Exc):
v2 in d.values()
def test_pickle(self):
d = {1: 10, "a": "ABC"}
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.keys(), proto)
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.values(), proto)
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.items(), proto)
if __name__ == "__main__":
unittest.main()
| |
"""
extracts features used with k-Nearest Neighbors algorithm
"""
import evfuncs
import numpy as np
# helper function that calculates syllable durations
_duration = lambda onsets, offsets: offsets - onsets
# helper function that calculates duration of silent gaps between syllables
_gapdurs = lambda onsets, offsets: onsets[1:] - offsets[:-1]
def duration(onsets, offsets, syls_to_use):
"""durations of syllables, using onsets and offsets from segmentation
Parameters
----------
onsets : 1d numpy array
syllable onset times as determined by a segmentation algorithm
offsets : 1d numpy array
syllable offset times as determined by a segmentation algorithm
syls_to_use : 1d numpy Boolean array
property of audiofileIO.song object, as set by set_syls_to_use(labels_to_use)
Returns
-------
_duration(onsets,offsets)[syls_to_use]
"""
return _duration(onsets, offsets)[syls_to_use]
def pre_duration(onsets, offsets, syls_to_use):
"""duration of preceding syllable
Parameters
----------
onsets : 1d numpy array
syllable onset times as determined by a segmentation algorithm
offsets : 1d numpy array
syllable offset times as determined by a segmentation algorithm
syls_to_use : 1d numpy Boolean array
property of audiofileIO.song object, as set by set_syls_to_use(labels_to_use)
Returns
-------
pre[syls_to_use] : 1d numpy array
where foll[1:] = _duration(onsets,offsets)[1:] and pre[0]=0
(because no syllable precedes the first syllable)
"""
pre = np.zeros((onsets.shape[-1],))
pre[1:] = _duration(onsets, offsets)[:-1]
return pre[syls_to_use]
def foll_duration(onsets, offsets, syls_to_use):
"""duration of following syllable
Parameters
----------
onsets : 1d numpy array
syllable onset times as determined by a segmentation algorithm
offsets : 1d numpy array
syllable offset times as determined by a segmentation algorithm
syls_to_use : 1d numpy Boolean array
property of audiofileIO.song object, as set by set_syls_to_use(labels_to_use)
Returns
-------
foll[syls_to_use] : 1d numpy array
where foll[:-1] = _duration(onsets,offsets)[1:] and foll[-1]=0
(because no syllable follows the last syllable)
"""
foll = np.zeros((onsets.shape[-1],))
foll[:-1] = _duration(onsets, offsets)[1:]
return foll[syls_to_use]
def pre_gapdur(onsets, offsets, syls_to_use):
"""duration of silent gap between syllable and preceding syllable
Parameters
----------
onsets : 1d numpy array
syllable onset times as determined by a segmentation algorithm
offsets : 1d numpy array
syllable offset times as determined by a segmentation algorithm
syls_to_use : 1d numpy Boolean array
property of audiofileIO.song object, as set by set_syls_to_use(labels_to_use)
Returns
-------
pre[syls_to_use] : 1d numpy array
where pre[1:] = _gapdurs(onsets,offsets) and pre[0]=0
(because no syllable precedes the first syllable)
"""
pre = np.zeros((onsets.shape[-1],))
pre[1:] = _gapdurs(onsets, offsets)
return pre[syls_to_use]
def foll_gapdur(onsets, offsets, syls_to_use):
"""duration of silent gap between syllable and following syllable
Parameters
----------
onsets : 1d numpy array
syllable onset times as determined by a segmentation algorithm
offsets : 1d numpy array
syllable offset times as determined by a segmentation algorithm
syls_to_use : 1d numpy Boolean array
property of audiofileIO.song object, as set by set_syls_to_use(labels_to_use)
Returns
-------
foll[syls_to_use] : 1d numpy array
where foll[:-1] = _gapdurs(onsets,offsets)[1:] and foll[-1]=0
(because no syllable follows the last syllable)
"""
foll = np.zeros((onsets.shape[-1],))
foll[:-1] = _gapdurs(onsets, offsets)
return foll[syls_to_use]
def _smooth_rect_amp(syllable):
"""helper function to calculate smoothed rectified amplitude
Parameters
----------
syllable
Returns
-------
smoothed : 1-d numpy array
raw audio waveform amplitude,
after bandpass filtering, squaring, and
and smoothing with evfuncs.smooth_data
"""
return evfuncs.smooth_data(
syllable.sylAudio, syllable.sampFreq, syllable.freqCutoffs
)
def mn_amp_smooth_rect(syllable):
"""mean of smoothed rectified amplitude
**from raw audio waveform**, not spectrogram
Parameters
----------
syllable
Returns
-------
mean_smoothed_rectified : scalar
np.mean(_smooth_rect_amp(syllable))
"""
return np.mean(_smooth_rect_amp(syllable))
def mn_amp_rms(syllable):
"""Root mean square of amplitude, i.e., square root of
mean of rectified amplitude
Parameters
----------
syllable : syllable object
Returns
-------
root_mean_squared : scalar
square root of value returned by mn_amp_smooth_rect
"""
return np.sqrt(mn_amp_smooth_rect(syllable))
def _spect_entropy(syllable):
"""helper function that calculates spectral entropy for syllable spectrogram
Parameters
----------
syllable : syllable object
Returns
-------
spectral_entropy : 1-d numpy array
spectral entropy for each time bin in syllable spectrogram
array will have length = number of columns in syllable.spect
"""
psd = np.power(np.abs(syllable.spect), 2)
psd_pdf = psd / np.sum(psd, axis=0)
return -np.sum(psd_pdf * np.log(psd_pdf), axis=0)
def mean_spect_entropy(syllable):
"""mean of spectral entropy across syllable
Parameters
----------
syllable
Returns
-------
mean(_spect_entropy(syllable))
"""
return np.mean(_spect_entropy(syllable))
def _hi_lo_ratio(syllable, middle=5000):
"""helper function to calculate hi/lo ratio
hi/lo ratio is ratio of sum of power in "high" frequencies
and sum of power in "low" frequencies,
where "hi" frequencies are those above "middle"
and "low" frequencies are below "middle"
Parameters
----------
syllable : syllable object
middle : int
defaults to 5000
Returns
-------
hi_lo_ratio : 1-d array
hi/lo ratio for each time bin in syllable spectrogram
array will have length = number of columns in syllable.spect
"""
psd = np.power(np.abs(syllable.spect), 2)
hi_ids = syllable.freqBins > middle
lo_ids = syllable.freqBins < middle
return np.log10(np.sum(psd[hi_ids, :], axis=0) / np.sum(psd[lo_ids, :], axis=0))
def mean_hi_lo_ratio(syllable):
"""mean of hi/lo ratio across syllable
Parameters
----------
syllable
Returns
-------
np.mean(_hi_lo_ratio(syllable))
"""
return np.mean(_hi_lo_ratio(syllable))
def _delta_inds(syllable, delta_times):
"""helper function that converts times from percent of duration
to seconds, then finds indices of time bins in sylllable
spectrogram closest to those times
Parameters
----------
syllable : syllable object
delta_times : list
two-element list, early time and later time
given in percent total duration of syllable
Returns
-------
inds : list
two-element list of indices
Return values are used with _delta lambda function
"""
dur = syllable.sylAudio.shape[-1] / syllable.sampFreq
t_early = dur * delta_times[0]
t_late = dur * delta_times[1]
return [
np.argmin(np.abs(syllable.timeBins - t_early)),
np.argmin(np.abs(syllable.timeBins - t_late)),
]
_delta = lambda vec, inds: vec[inds[0]] - vec[inds[1]]
def delta_amp_smooth_rect(syllable, delta_times=[0.2, 0.8]):
"""change in smoothed rectified amplitude between two time points
Parameters
----------
syllable : syllable object
delta_times : list
two-element list, early time and later time
given in percent total duration of syllable, default is [0.2,0.8]
Returns
-------
delta_amp_smooth_rect : scalar
_delta(_smooth_rect_amp(syllable))
"""
inds = _delta_inds(syllable, delta_times)
amp = _smooth_rect_amp(syllable)
return _delta(amp, inds)
def delta_entropy(syllable, delta_times=[0.2, 0.8]):
"""change in entropy between two time points
Parameters
----------
syllable : syllable object
delta_times : list
two-element list, early time and later time
given in percent total duration of syllable, default is [0.2,0.8]
Returns
-------
delta_entropy : scalar
_delta(_spect_entropy(syllable))
"""
inds = _delta_inds(syllable, delta_times)
entropy = _spect_entropy(syllable)
return _delta(entropy, inds)
def delta_hi_lo_ratio(syllable, delta_times=[0.2, 0.8]):
"""change in hi/lo ratio between two time points
Parameters
----------
syllable
delta_times : list
two-element list, early time and later time
given in percent total duration of syllable, default is [0.2,0.8]
Returns
-------
delta_hi_lo_ratio : scalar
_delta(_hi_lo_ratio(syllable))
"""
inds = _delta_inds(syllable, delta_times)
hi_lo = _hi_lo_ratio(syllable)
return _delta(hi_lo, inds)
| |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"""
Utilities and platform-specific fixes
The portability fixes try to provide a consistent behavior of the Waf API
through Python versions 2.3 to 3.X and across different platforms (win32, linux, etc)
"""
import os, sys, errno, traceback, inspect, re, shutil, datetime, gc
import subprocess # <- leave this!
try:
from collections import deque
except ImportError:
class deque(list):
"""A deque for Python 2.3 which does not have one"""
def popleft(self):
return self.pop(0)
try:
import _winreg as winreg
except ImportError:
try:
import winreg
except ImportError:
winreg = None
from waflib import Errors
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
try:
from hashlib import md5
except ImportError:
try:
from md5 import md5
except ImportError:
# never fail to enable fixes from another module
pass
try:
import threading
except ImportError:
class threading(object):
"""
A fake threading class for platforms lacking the threading module.
Use ``waf -j1`` on those platforms
"""
pass
class Lock(object):
"""Fake Lock class"""
def acquire(self):
pass
def release(self):
pass
threading.Lock = threading.Thread = Lock
else:
run_old = threading.Thread.run
def run(*args, **kwargs):
try:
run_old(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
sys.excepthook(*sys.exc_info())
threading.Thread.run = run
SIG_NIL = 'iluvcuteoverload'.encode()
"""Arbitrary null value for a md5 hash. This value must be changed when the hash value is replaced (size)"""
O644 = 420
"""Constant representing the permissions for regular files (0644 raises a syntax error on python 3)"""
O755 = 493
"""Constant representing the permissions for executable files (0755 raises a syntax error on python 3)"""
rot_chr = ['\\', '|', '/', '-']
"List of characters to use when displaying the throbber (progress bar)"
rot_idx = 0
"Index of the current throbber character (progress bar)"
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
"""
defaultdict was introduced in python 2.5, so we leave it for python 2.4 and 2.3
"""
def __init__(self, default_factory):
super(defaultdict, self).__init__()
self.default_factory = default_factory
def __getitem__(self, key):
try:
return super(defaultdict, self).__getitem__(key)
except KeyError:
value = self.default_factory()
self[key] = value
return value
is_win32 = sys.platform in ('win32', 'cli')
# we should have put this in the Logs.py file instead :-/
indicator = '\x1b[K%s%s%s\r'
if is_win32 and 'NOCOLOR' in os.environ:
indicator = '%s%s%s\r'
def readf(fname, m='r', encoding='ISO8859-1'):
"""
Read an entire file into a string, use this function instead of os.open() whenever possible.
In practice the wrapper node.read(..) should be preferred to this function::
def build(ctx):
from waflib import Utils
txt = Utils.readf(self.path.find_node('wscript').abspath())
txt = ctx.path.find_node('wscript').read()
:type fname: string
:param fname: Path to file
:type m: string
:param m: Open mode
:type encoding: string
:param encoding: encoding value, only used for python 3
:rtype: string
:return: Content of the file
"""
if sys.hexversion > 0x3000000 and not 'b' in m:
m += 'b'
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
txt = txt.decode(encoding)
else:
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
return txt
def writef(fname, data, m='w', encoding='ISO8859-1'):
"""
Write an entire file from a string, use this function instead of os.open() whenever possible.
In practice the wrapper node.write(..) should be preferred to this function::
def build(ctx):
from waflib import Utils
txt = Utils.writef(self.path.make_node('i_like_kittens').abspath(), 'some data')
self.path.make_node('i_like_kittens').write('some data')
:type fname: string
:param fname: Path to file
:type data: string
:param data: The contents to write to the file
:type m: string
:param m: Open mode
:type encoding: string
:param encoding: encoding value, only used for python 3
"""
if sys.hexversion > 0x3000000 and not 'b' in m:
data = data.encode(encoding)
m += 'b'
f = open(fname, m)
try:
f.write(data)
finally:
f.close()
def h_file(fname):
"""
Compute a hash value for a file by using md5. This method may be replaced by
a faster version if necessary. The following uses the file size and the timestamp value::
import stat
from waflib import Utils
def h_file(fname):
st = os.stat(fname)
if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file')
m = Utils.md5()
m.update(str(st.st_mtime))
m.update(str(st.st_size))
m.update(fname)
return m.digest()
Utils.h_file = h_file
:type fname: string
:param fname: path to the file to hash
:return: hash of the file contents
"""
f = open(fname, 'rb')
m = md5()
try:
while fname:
fname = f.read(200000)
m.update(fname)
finally:
f.close()
return m.digest()
if hasattr(os, 'O_NOINHERIT'):
def readf_win32(f, m='r', encoding='ISO8859-1'):
flags = os.O_NOINHERIT | os.O_RDONLY
if 'b' in m:
flags |= os.O_BINARY
if '+' in m:
flags |= os.O_RDWR
try:
fd = os.open(f, flags)
except OSError:
raise IOError('Cannot read from %r' % f)
if sys.hexversion > 0x3000000 and not 'b' in m:
m += 'b'
f = os.fdopen(fd, m)
try:
txt = f.read()
finally:
f.close()
txt = txt.decode(encoding)
else:
f = os.fdopen(fd, m)
try:
txt = f.read()
finally:
f.close()
return txt
def writef_win32(f, data, m='w', encoding='ISO8859-1'):
if sys.hexversion > 0x3000000 and not 'b' in m:
data = data.encode(encoding)
m += 'b'
flags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT
if 'b' in m:
flags |= os.O_BINARY
if '+' in m:
flags |= os.O_RDWR
try:
fd = os.open(f, flags)
except OSError:
raise IOError('Cannot write to %r' % f)
f = os.fdopen(fd, m)
try:
f.write(data)
finally:
f.close()
def h_file_win32(fname):
try:
fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT)
except OSError:
raise IOError('Cannot read from %r' % fname)
f = os.fdopen(fd, 'rb')
m = md5()
try:
while fname:
fname = f.read(200000)
m.update(fname)
finally:
f.close()
return m.digest()
# replace the default functions
readf_old = readf
writef_old = writef
h_file_old = h_file
readf = readf_win32
writef = writef_win32
h_file = h_file_win32
try:
x = ''.encode('hex')
except LookupError:
import binascii
def to_hex(s):
ret = binascii.hexlify(s)
if not isinstance(ret, str):
ret = ret.decode('utf-8')
return ret
else:
def to_hex(s):
return s.encode('hex')
to_hex.__doc__ = """
Return the hexadecimal representation of a string
:param s: string to convert
:type s: string
"""
listdir = os.listdir
if is_win32:
def listdir_win32(s):
"""
List the contents of a folder in a portable manner.
On Win32, return the list of drive letters: ['C:', 'X:', 'Z:']
:type s: string
:param s: a string, which can be empty on Windows
"""
if not s:
try:
import ctypes
except ImportError:
# there is nothing much we can do
return [x + ':\\' for x in list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')]
else:
dlen = 4 # length of "?:\\x00"
maxdrives = 26
buf = ctypes.create_string_buffer(maxdrives * dlen)
ndrives = ctypes.windll.kernel32.GetLogicalDriveStringsA(maxdrives*dlen, ctypes.byref(buf))
return [ str(buf.raw[4*i:4*i+2].decode('ascii')) for i in range(int(ndrives/dlen)) ]
if len(s) == 2 and s[1] == ":":
s += os.sep
if not os.path.isdir(s):
e = OSError('%s is not a directory' % s)
e.errno = errno.ENOENT
raise e
return os.listdir(s)
listdir = listdir_win32
def num2ver(ver):
"""
Convert a string, tuple or version number into an integer. The number is supposed to have at most 4 digits::
from waflib.Utils import num2ver
num2ver('1.3.2') == num2ver((1,3,2)) == num2ver((1,3,2,0))
:type ver: string or tuple of numbers
:param ver: a version number
"""
if isinstance(ver, str):
ver = tuple(ver.split('.'))
if isinstance(ver, tuple):
ret = 0
for i in range(4):
if i < len(ver):
ret += 256**(3 - i) * int(ver[i])
return ret
return ver
def ex_stack():
"""
Extract the stack to display exceptions
:return: a string represening the last exception
"""
exc_type, exc_value, tb = sys.exc_info()
exc_lines = traceback.format_exception(exc_type, exc_value, tb)
return ''.join(exc_lines)
def to_list(sth):
"""
Convert a string argument to a list by splitting on spaces, and pass
through a list argument unchanged::
from waflib.Utils import to_list
lst = to_list("a b c d")
:param sth: List or a string of items separated by spaces
:rtype: list
:return: Argument converted to list
"""
if isinstance(sth, str):
return sth.split()
else:
return sth
re_nl = re.compile('\r*\n', re.M)
def str_to_dict(txt):
"""
Parse a string with key = value pairs into a dictionary::
from waflib import Utils
x = Utils.str_to_dict('''
a = 1
b = test
''')
:type s: string
:param s: String to parse
:rtype: dict
:return: Dictionary containing parsed key-value pairs
"""
tbl = {}
lines = re_nl.split(txt)
for x in lines:
x = x.strip()
if not x or x.startswith('#') or x.find('=') < 0:
continue
tmp = x.split('=')
tbl[tmp[0].strip()] = '='.join(tmp[1:]).strip()
return tbl
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret = path.split('/')[2:]
ret[0] = '/' + ret[0]
return ret
return path.split('/')
re_sp = re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret = re.split(re_sp, path)[2:]
ret[0] = '\\' + ret[0]
return ret
return re.split(re_sp, path)
if sys.platform == 'cygwin':
split_path = split_path_cygwin
elif is_win32:
split_path = split_path_win32
split_path.__doc__ = """
Split a path by / or \\. This function is not like os.path.split
:type path: string
:param path: path to split
:return: list of strings
"""
def check_dir(path):
"""
Ensure that a directory exists (similar to ``mkdir -p``).
:type dir: string
:param dir: Path to directory
"""
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as e:
if not os.path.isdir(path):
raise Errors.WafError('Cannot create the folder %r' % path, ex=e)
def def_attrs(cls, **kw):
"""
Set default attributes on a class instance
:type cls: class
:param cls: the class to update the given attributes in.
:type kw: dict
:param kw: dictionary of attributes names and values.
"""
for k, v in kw.items():
if not hasattr(cls, k):
setattr(cls, k, v)
def quote_define_name(s):
"""
Convert a string to an identifier suitable for C defines.
:type s: string
:param s: String to convert
:rtype: string
:return: Identifier suitable for C defines
"""
fu = re.compile("[^a-zA-Z0-9]").sub("_", s)
fu = fu.upper()
return fu
def h_list(lst):
"""
Hash lists. For tuples, using hash(tup) is much more efficient
:param lst: list to hash
:type lst: list of strings
:return: hash of the list
"""
m = md5()
m.update(str(lst).encode())
return m.digest()
def h_fun(fun):
"""
Hash functions
:param fun: function to hash
:type fun: function
:return: hash of the function
"""
try:
return fun.code
except AttributeError:
try:
h = inspect.getsource(fun)
except IOError:
h = "nocode"
try:
fun.code = h
except AttributeError:
pass
return h
reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}")
def subst_vars(expr, params):
"""
Replace ${VAR} with the value of VAR taken from a dict or a config set::
from waflib import Utils
s = Utils.subst_vars('${PREFIX}/bin', env)
:type expr: string
:param expr: String to perform substitution on
:param params: Dictionary or config set to look up variable values.
"""
def repl_var(m):
if m.group(1):
return '\\'
if m.group(2):
return '$'
try:
# ConfigSet instances may contain lists
return params.get_flat(m.group(3))
except AttributeError:
return params[m.group(3)]
return reg_subst.sub(repl_var, expr)
def destos_to_binfmt(key):
"""
Return the binary format based on the unversioned platform name.
:param key: platform name
:type key: string
:return: string representing the binary format
"""
if key == 'darwin':
return 'mac-o'
elif key in ('win32', 'cygwin', 'uwin', 'msys'):
return 'pe'
return 'elf'
def unversioned_sys_platform():
"""
Return the unversioned platform name.
Some Python platform names contain versions, that depend on
the build environment, e.g. linux2, freebsd6, etc.
This returns the name without the version number. Exceptions are
os2 and win32, which are returned verbatim.
:rtype: string
:return: Unversioned platform name
"""
s = sys.platform
if s == 'java':
# The real OS is hidden under the JVM.
from java.lang import System
s = System.getProperty('os.name')
# see http://lopica.sourceforge.net/os.html for a list of possible values
if s == 'Mac OS X':
return 'darwin'
elif s.startswith('Windows '):
return 'win32'
elif s == 'OS/2':
return 'os2'
elif s == 'HP-UX':
return 'hpux'
elif s in ('SunOS', 'Solaris'):
return 'sunos'
else: s = s.lower()
# powerpc == darwin for our purposes
if s == 'powerpc':
return 'darwin'
if s == 'win32' or s.endswith('os2') and s != 'sunos2': return s
return re.split('\d+$', s)[0]
def nada(*k, **kw):
"""
A function that does nothing
:return: None
"""
pass
class Timer(object):
"""
Simple object for timing the execution of commands.
Its string representation is the current time::
from waflib.Utils import Timer
timer = Timer()
a_few_operations()
s = str(timer)
"""
def __init__(self):
self.start_time = datetime.datetime.utcnow()
def __str__(self):
delta = datetime.datetime.utcnow() - self.start_time
days = int(delta.days)
hours = delta.seconds // 3600
minutes = (delta.seconds - hours * 3600) // 60
seconds = delta.seconds - hours * 3600 - minutes * 60 + float(delta.microseconds) / 1000 / 1000
result = ''
if days:
result += '%dd' % days
if days or hours:
result += '%dh' % hours
if days or hours or minutes:
result += '%dm' % minutes
return '%s%.3fs' % (result, seconds)
if is_win32:
old = shutil.copy2
def copy2(src, dst):
"""
shutil.copy2 does not copy the file attributes on windows, so we
hack into the shutil module to fix the problem
"""
old(src, dst)
shutil.copystat(src, dst)
setattr(shutil, 'copy2', copy2)
if os.name == 'java':
# Jython cannot disable the gc but they can enable it ... wtf?
try:
gc.disable()
gc.enable()
except NotImplementedError:
gc.disable = gc.enable
def read_la_file(path):
"""
Read property files, used by msvc.py
:param path: file to read
:type path: string
"""
sp = re.compile(r'^([^=]+)=\'(.*)\'$')
dc = {}
for line in readf(path).splitlines():
try:
_, left, right, _ = sp.split(line.strip())
dc[left] = right
except ValueError:
pass
return dc
def nogc(fun):
"""
Decorator: let a function disable the garbage collector during its execution.
It is used in the build context when storing/loading the build cache file (pickle)
:param fun: function to execute
:type fun: function
:return: the return value of the function executed
"""
def f(*k, **kw):
try:
gc.disable()
ret = fun(*k, **kw)
finally:
gc.enable()
return ret
f.__doc__ = fun.__doc__
return f
def run_once(fun):
"""
Decorator: let a function cache its results, use like this::
@run_once
def foo(k):
return 345*2343
:param fun: function to execute
:type fun: function
:return: the return value of the function executed
"""
cache = {}
def wrap(k):
try:
return cache[k]
except KeyError:
ret = fun(k)
cache[k] = ret
return ret
wrap.__cache__ = cache
return wrap
def get_registry_app_path(key, filename):
if not winreg:
return None
try:
result = winreg.QueryValue(key, "Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\%s.exe" % filename[0])
except WindowsError:
pass
else:
if os.path.isfile(result):
return result
| |
import io
import struct
class Error(Exception):
pass
class ChunkHeader:
HEADER_STRUCT = struct.Struct('<4sI')
def __init__(self, id, size):
self._id = id
self._size = size
@classmethod
def readfrom(cls, iostream):
buffer = iostream.read(cls.HEADER_STRUCT.size)
if len(buffer) < cls.HEADER_STRUCT.size:
raise Error('chunk header truncated')
idbytes, size = cls.HEADER_STRUCT.unpack(buffer)
try:
id = idbytes.decode('ascii')
except UnicodeDecodeError as error:
raise Error('chunk id not ascii-decodable') from error
return cls(id, size)
@property
def id(self):
return self._id
@property
def size(self):
return self._size
def writeto(self, iostream):
idbytes = self.id.encode('ascii')
buffer = self.HEADER_STRUCT.pack(idbytes, self.size)
iostream.write(buffer)
class ChunkData:
def __init__(self, iostream, size, startpos):
self._iostream = iostream
self._size = size
self._startpos = startpos
self._position = 0
@classmethod
def streamfrom(cls, iostream, size):
startpos = iostream.seek(0, io.SEEK_CUR)
iostream.seek(size, io.SEEK_CUR)
return cls(iostream, size, startpos)
def __repr__(self):
return 'riff.ChunkData(size={0})'.format(self.size)
def read(self, size=None):
self._iostream.seek(self._startpos + self.tell(), io.SEEK_SET)
maxsize = self.size - self.tell()
size = maxsize if size is None or size < 0 else min(size, maxsize)
buffer = self._iostream.read(size)
self._position += len(buffer)
if len(buffer) < size:
raise Error('truncated at position {}'.format(self.tell()))
return buffer
def seek(self, offset, whence=io.SEEK_SET):
if whence == io.SEEK_SET:
position = offset
elif whence == io.SEEK_CUR:
position = self._position + offset
elif whence == io.SEEK_END:
position = self.size + offset
else:
raise ValueError('invalid whence value')
self._position = max(0, min(position, self.size))
return self._position
@property
def size(self):
return self._size
def tell(self):
return self.seek(0, io.SEEK_CUR)
class Chunk:
DEFAULT_PAD_BYTE = b'\x00'
PAD_SIZE = 1
def __init__(self, header, data, padbyte):
self._header = header
self._data = data
self._padbyte = padbyte
@classmethod
def create(cls, id, size, datastream):
header = ChunkHeader(id, size)
data = ChunkData.streamfrom(datastream, size)
padded = size % 2 != 0
padbyte = cls.DEFAULT_PAD_BYTE if padded else b''
return cls(header, data, padbyte)
@classmethod
def _readfrom(cls, iostream, stream):
header = ChunkHeader.readfrom(iostream)
if not stream:
buffer = iostream.read(header.size)
if len(buffer) < header.size:
raise Error('chunk data truncated')
iostream = io.BytesIO(buffer)
data = ChunkData.streamfrom(iostream, header.size)
padded = header.size % 2 != 0
padbyte = iostream.read(cls.PAD_SIZE) if padded else b''
return cls(header, data, padbyte)
@classmethod
def readfrom(cls, iostream):
return cls._readfrom(iostream, stream=False)
@classmethod
def streamfrom(cls, iostream):
return cls._readfrom(iostream, stream=True)
def __repr__(self):
return "riff.Chunk(id='{}', size={})".format(self.id, self.size)
@property
def data(self):
return self._data
@property
def id(self):
return self._header.id
@property
def padded(self):
return self.size % 2 != 0
@property
def size(self):
return self._header.size
class RiffChunk:
FORMAT_STRUCT = struct.Struct('4s')
ID = 'RIFF'
def __init__(self, size, format, subchunks):
self._size = size
self._format = format
self._subchunks = subchunks
@classmethod
def _readfrom(cls, iostream, stream):
readchunk = Chunk.streamfrom if stream else Chunk.readfrom
chunk = readchunk(iostream)
if chunk.id != cls.ID:
raise Error("unexpected chunk id '{}'".format(chunk.id))
buffer = chunk.data.read(cls.FORMAT_STRUCT.size)
if len(buffer) < cls.FORMAT_STRUCT.size:
raise Error('riff chunk format truncated')
formatbytes, = cls.FORMAT_STRUCT.unpack(buffer)
try:
format = formatbytes.decode('ascii')
except UnicodeDecodeError as error:
raise Error('riff chunk format not ascii-decodable') from error
subchunks = []
while chunk.data.tell() < chunk.data.size:
subchunk = readchunk(chunk.data)
subchunks.append(subchunk)
return cls(chunk.size, format, subchunks)
@classmethod
def readfrom(cls, iostream):
return cls._readfrom(iostream, stream=False)
@classmethod
def streamfrom(cls, iostream):
return cls._readfrom(iostream, stream=True)
@property
def format(self):
return self._format
@property
def size(self):
return self._size
def subchunks(self):
for subchunk in self._subchunks:
yield subchunk
class WaveFormatChunk:
ID = 'fmt '
def __init__(self, channels, samplerate, samplebits):
self._channels = channels
self._samplerate = samplerate
self._samplebits = samplebits
@property
def blockalign(self):
return self.channels * self.samplebits / 8
@property
def byterate(self):
return self.samplerate * self.blockalign
@property
def channels(self):
return self._channels
@property
def samplebits(self):
return self._samplebits
@property
def samplerate(self):
return self._samplerate
class WaveChunk:
FORMAT = 'WAVE'
def __init__(self, size):
self._size = size
@classmethod
def readfrom(cls, iostream):
riffchunk = RiffChunk.readfrom(iostream)
if riffchunk.format != cls.FORMAT:
raise Error("'{}' != '{}'".format(riffchunk.format, cls.FORMAT))
try:
formatchunk = next(
subchunk
for subchunk in riffchunk.subchunks()
if subchunk.id == WaveFormatChunk.ID
)
except StopIteration:
raise Error('no wave format subchunk found')
@property
def id(self):
return self.ID
@property
def size(self):
return self._size
| |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2013 Rackspace Hosting All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import errno
import functools
import logging
import os
import six
import sys
import traceback
from taskflow import exceptions
from taskflow.utils import reflection
LOG = logging.getLogger(__name__)
def wraps(fn):
"""This will not be needed in python 3.2 or greater which already has this
built-in to its functools.wraps method.
"""
def wrapper(f):
f = functools.wraps(fn)(f)
f.__wrapped__ = getattr(fn, '__wrapped__', fn)
return f
return wrapper
def get_version_string(obj):
"""Gets a object's version as a string.
Returns string representation of object's version taken from
its 'version' attribute, or None if object does not have such
attribute or its version is None.
"""
obj_version = getattr(obj, 'version', None)
if isinstance(obj_version, (list, tuple)):
obj_version = '.'.join(str(item) for item in obj_version)
if obj_version is not None and not isinstance(obj_version,
six.string_types):
obj_version = str(obj_version)
return obj_version
def get_duplicate_keys(iterable, key=None):
if key is not None:
iterable = six.moves.map(key, iterable)
keys = set()
duplicates = set()
for item in iterable:
if item in keys:
duplicates.add(item)
keys.add(item)
return duplicates
def is_valid_attribute_name(name, allow_self=False, allow_hidden=False):
"""Validates that a string name is a valid/invalid python attribute name"""
if not isinstance(name, six.string_types) or len(name) == 0:
return False
# Make the name just be a simple string in latin-1 encoding in python3
name = six.b(name)
if not allow_self and name.lower().startswith('self'):
return False
if not allow_hidden and name.startswith("_"):
return False
# See: http://docs.python.org/release/2.5.2/ref/grammar.txt (or newer)
#
# Python identifiers should start with a letter.
if not name[0].isalpha():
return False
for i in range(1, len(name)):
# The rest of an attribute name follows: (letter | digit | "_")*
if not (name[i].isalpha() or name[i].isdigit() or name[i] == "_"):
return False
return True
class AttrDict(dict):
"""Helper utility dict sub-class to create a class that can be accessed by
attribute name from a dictionary that contains a set of keys and values.
"""
NO_ATTRS = tuple(reflection.get_member_names(dict))
@classmethod
def _is_valid_attribute_name(cls, name):
if not is_valid_attribute_name(name):
return False
# Make the name just be a simple string in latin-1 encoding in python3
name = six.b(name)
if name in cls.NO_ATTRS:
return False
return True
def __init__(self, **kwargs):
for (k, v) in kwargs.items():
if not self._is_valid_attribute_name(k):
raise AttributeError("Invalid attribute name: '%s'" % (k))
self[k] = v
def __getattr__(self, name):
if not self._is_valid_attribute_name(name):
raise AttributeError("Invalid attribute name: '%s'" % (name))
try:
return self[name]
except KeyError:
raise AttributeError("No attributed named: '%s'" % (name))
def __setattr__(self, name, value):
if not self._is_valid_attribute_name(name):
raise AttributeError("Invalid attribute name: '%s'" % (name))
self[name] = value
class ExponentialBackoff(object):
"""An iterable object that will yield back an exponential delay sequence
provided an exponent and a number of items to yield. This object may be
iterated over multiple times (yielding the same sequence each time).
"""
def __init__(self, attempts, exponent=2):
self.attempts = int(attempts)
self.exponent = exponent
def __iter__(self):
if self.attempts <= 0:
raise StopIteration()
for i in xrange(0, self.attempts):
yield self.exponent ** i
def __str__(self):
return "ExponentialBackoff: %s" % ([str(v) for v in self])
def as_bool(val):
"""Converts an arbitary value into a boolean."""
if isinstance(val, bool):
return val
if isinstance(val, six.string_types):
if val.lower() in ('f', 'false', '0', 'n', 'no'):
return False
if val.lower() in ('t', 'true', '1', 'y', 'yes'):
return True
return bool(val)
def as_int(obj, quiet=False):
"""Converts an arbitary value into a integer."""
# Try "2" -> 2
try:
return int(obj)
except (ValueError, TypeError):
pass
# Try "2.5" -> 2
try:
return int(float(obj))
except (ValueError, TypeError):
pass
# Eck, not sure what this is then.
if not quiet:
raise TypeError("Can not translate %s to an integer." % (obj))
return obj
# Taken from oslo-incubator file-utils but since that module pulls in a large
# amount of other files it does not seem so useful to include that full
# module just for this function.
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
class TransitionNotifier(object):
"""A utility helper class that can be used to subscribe to
notifications of events occuring as well as allow a entity to post said
notifications to subscribers.
"""
RESERVED_KEYS = ('details',)
ANY = '*'
def __init__(self):
self._listeners = collections.defaultdict(list)
def reset(self):
self._listeners = collections.defaultdict(list)
def notify(self, state, details):
listeners = list(self._listeners.get(self.ANY, []))
for i in self._listeners[state]:
if i not in listeners:
listeners.append(i)
if not listeners:
return
for (callback, args, kwargs) in listeners:
if args is None:
args = []
if kwargs is None:
kwargs = {}
kwargs['details'] = details
try:
callback(state, *args, **kwargs)
except Exception:
LOG.exception(("Failure calling callback %s to notify about"
" state transition %s"), callback, state)
def register(self, state, callback, args=None, kwargs=None):
assert isinstance(callback, collections.Callable)
for i, (cb, args, kwargs) in enumerate(self._listeners.get(state, [])):
if cb is callback:
raise ValueError("Callback %s already registered" % (callback))
if kwargs:
for k in self.RESERVED_KEYS:
if k in kwargs:
raise KeyError(("Reserved key '%s' not allowed in "
"kwargs") % k)
kwargs = copy.copy(kwargs)
if args:
args = copy.copy(args)
self._listeners[state].append((callback, args, kwargs))
def deregister(self, state, callback):
if state not in self._listeners:
return
for i, (cb, args, kwargs) in enumerate(self._listeners[state]):
if cb is callback:
self._listeners[state].pop(i)
break
def copy_exc_info(exc_info):
"""Make copy of exception info tuple, as deep as possible"""
if exc_info is None:
return None
exc_type, exc_value, tb = exc_info
# NOTE(imelnikov): there is no need to copy type, and
# we can't copy traceback
return (exc_type, copy.deepcopy(exc_value), tb)
def are_equal_exc_info_tuples(ei1, ei2):
if ei1 == ei2:
return True
if ei1 is None or ei2 is None:
return False # if both are None, we returned True above
# NOTE(imelnikov): we can't compare exceptions with '=='
# because we want exc_info be equal to it's copy made with
# copy_exc_info above
if ei1[0] is not ei2[0]:
return False
if not all((type(ei1[1]) == type(ei2[1]),
str(ei1[1]) == str(ei2[1]),
repr(ei1[1]) == repr(ei2[1]))):
return False
if ei1[2] == ei2[2]:
return True
tb1 = traceback.format_tb(ei1[2])
tb2 = traceback.format_tb(ei2[2])
return tb1 == tb2
class Failure(object):
"""Object that represents failure.
Failure objects encapsulate exception information so that
it can be re-used later to re-raise or inspect.
"""
def __init__(self, exc_info=None, **kwargs):
if not kwargs:
if exc_info is None:
exc_info = sys.exc_info()
self._exc_info = exc_info
self._exc_type_names = list(
reflection.get_all_class_names(exc_info[0], up_to=Exception))
if not self._exc_type_names:
raise TypeError('Invalid exception type: %r' % exc_info[0])
self._exception_str = str(self._exc_info[1])
self._traceback_str = ''.join(
traceback.format_tb(self._exc_info[2]))
else:
self._exc_info = exc_info # may be None
self._exception_str = kwargs.pop('exception_str')
self._exc_type_names = kwargs.pop('exc_type_names', [])
self._traceback_str = kwargs.pop('traceback_str', None)
if kwargs:
raise TypeError(
'Failure.__init__ got unexpected keyword argument(s): %s'
% ', '.join(six.iterkeys(kwargs)))
def _matches(self, other):
if self is other:
return True
return (self._exc_type_names == other._exc_type_names
and self.exception_str == other.exception_str
and self.traceback_str == other.traceback_str)
def matches(self, other):
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
are_equal_exc_info_tuples(self.exc_info, other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or None if exception value is not present.
Exception value may be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exc_info(self):
"""Exception info tuple or None."""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures):
"""Re-raise exceptions if argument is not empty.
If argument is empty list, this method returns None. If
argument is list with single Failure object in it,
this failure is reraised. Else, WrappedFailure exception
is raised with failures list as causes.
"""
failures = list(failures)
if len(failures) == 1:
failures[0].reraise()
elif len(failures) > 1:
raise exceptions.WrappedFailure(failures)
def reraise(self):
"""Re-raise captured exception"""
if self._exc_info:
six.reraise(*self._exc_info)
else:
raise exceptions.WrappedFailure([self])
def check(self, *exc_classes):
"""Check if any of exc_classes caused the failure
Arguments of this method can be exception types or type
names (stings). If captured excption is instance of
exception of given type, the corresponding argument is
returned. Else, None is returned.
"""
for cls in exc_classes:
if isinstance(cls, type):
err = reflection.get_class_name(cls)
else:
err = cls
if err in self._exc_type_names:
return cls
return None
def __str__(self):
return 'Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str)
def __iter__(self):
"""Iterate over exception type names"""
for et in self._exc_type_names:
yield et
def copy(self):
return Failure(exc_info=copy_exc_info(self.exc_info),
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_type_names=self._exc_type_names[:])
| |
from django.shortcuts import render
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render,render_to_response
from django.template import RequestContext
from rango.models import Category,Page,UserProfile,User
from rango.forms import CategoryForm,PageForm,UserProfileForm,UserForm,LoginForm
from django.contrib.auth import authenticate, login,logout
from django.contrib.auth.decorators import login_required
from datetime import datetime
from rango.bing_search import run_query
from django.shortcuts import redirect
@login_required
def add_page(request,category_name_url):
context = RequestContext(request)
category_name = category_name_url.replace('_', ' ')
if request.method == 'POST':
print ("POST DETECTED")
form = PageForm(request.POST)
if form.is_valid():
# This time we cannot commit straight away.
# Not all fields are automatically populated!
print ("form is valid!")
page = form.save(commit=False)
# Retrieve the associated Category object so we can add it.
# Wrap the code in a try block - check if the category actually exists!
try:
cat = Category.objects.get(name=category_name)
page.category = cat
except Category.DoesNotExist:
# If we get here, the category does not exist.
# Go back and render the add category form as a way of saying the category does not exist.
return render_to_response('rango/add_category.html', {}, context)
# Also, create a default value for the number of views.
page.views = 0
# With this, we can then save our new model instance.
page.save()
# Now that the page is saved, display the category instead.
return category(request, category_name_url)
else:
print ("form is Not valid!")
print form.errors
else:
form = PageForm()
return render_to_response( 'rango/add_page.html',
{'category_name_url': category_name_url,
'category_name': category_name, 'form': form},
context)
@login_required
def add_category(request):
context = RequestContext(request)
#An HTTP Post?
if request.method =='POST':
form = CategoryForm(request.POST)
#Verifying if the form is valid
if form.is_valid():
#Saving category to database
form.save(commit=True)
#Return user to he homepage
return index(request)
else:
print form.errors
else:
#If the request was not post, display the form to enter data
form = CategoryForm()
# Bad form (or form details), no form supplied...
# Render the form with error messages (if any).
return render_to_response('rango/add_category.html', {'form': form}, context)
def index (request):
request.session.set_test_cookie()
##Request the context of the request
#El context contiene informacion como detalles la maquina del cliente,
context = RequestContext(request)
#Obtaining Categories
cat_list = Category.objects.order_by('likes')[:5]
most_viewed = Category.objects.order_by('-views')[:5]
for category in cat_list:
category.url = category.name.replace(' ', '_')
for category in most_viewed:
category.url = category.name.replace(' ', '_')
#Se construye un diccionario para pasarlo como el context al template engine
#Passing categories to context dictionary in order to show it on a template
context_dict = {'categories':cat_list}
context_dict['most_viewed']= most_viewed
context_dict['cat_list']=cat_list
#COOKIES
response = render_to_response('rango/index.html',context_dict,context)
if request.session.get('last_visit'):
last_visit_time = request.session.get('last_visit')
visits = request.session.get('visits',0)
if (datetime.now() - datetime.strptime(last_visit_time[:-7],"%Y-%m-%d %H:%M:%S")).seconds >5:
request.session['visits'] = visits+1
request.session['last_visit'] = str(datetime.now())
else:
request.session['last_visit'] = str(datetime.now())
request.session['visits'] = 1
# Return response back to the user, updating any cookies that need changed.
return render_to_response('rango/index.html', context_dict, context)
def category(request,category_name_url):
context = RequestContext(request)
#Sutituyendo espacios con _ para construir la URL
category_name = category_name_url.replace('_',' ')
categories = Category.objects.order_by('likes')[:5]
for category in categories:
category.url = category.name.replace(' ', '_')
#Creando context dictionay
context_dict = {'category_name':category_name,'categories':categories}
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# Run our Bing function to get the results list!
result_list = run_query(query)
context_dict['result_list']=result_list
try:
#Intentamos obtener la categoria en base al nombre de la base de datos
category = Category.objects.get(name=category_name)
# Add category to the context so that we can access the id and likes
context_dict['category'] = category
#Obtenemos las paginas asociadas a a categoria obtenida
pages = Page.objects.filter(category=category).order_by('views')[:5].reverse()
#Agregamos el resultado al diccionario del contexto
context_dict['pages']= pages
#Agregamos tambien la categoria al diccionario
context_dict['category'] = category
context_dict['category_name_url'] = category_name_url
except Category.DoesNotExist:
#No hacemos nada, el tenplate mostrara un mensaje indicando que no hay categorias
pass
return render_to_response('rango/category.html',context_dict,context)
def about(request):
context = RequestContext(request)
var = "This is the About Page."
if request.session.get('visits'):
count = request.session.get('visits')
else:
count = 0
context_dict = {'variable':var,'count':count}
return render_to_response('rango/about.html',context_dict,context)
def register(request):
context=RequestContext(request)
if request.session.test_cookie_worked():
print ">>>> TEST COOKIE WORKED!"
request.session.delete_test_cookie()
context=RequestContext(request)
# A boolean value for telling the template whether the registration was successful.
# Set to False initially. Code changes value to True when registration succeeds.
registered= False
#If its a post method we will process the data in the form
if request.method == 'POST':
#Attempting to get all the data from both of the forms from the users input
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
#If forms are valid...
if user_form.is_valid() and profile_form.is_valid():
#save them to database
user = user_form.save()
#hashing the password with get_password method
user.set_password(user.password)
user.save()
# Now sort out the UserProfile instance.
# Since we need to set the user attribute ourselves, we set commit=False.
# This delays saving the model until we're ready to avoid integrity problems.
profile = profile_form.save(commit=False)
profile.user = user
#Checking if the user provided a profile picture
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
#Boolean to true to tell the template regitration was succesful
registered = True
#If form is no valid
else:
#Print errors
print user_form.errors,profile_form.errors
#Not an HTTP Post..?
else:
#Diplay the forms to get the data
user_form = UserForm()
profile_form = UserProfileForm()
# Render the templates depending on the context
return render_to_response('rango/register.html',{'user_form':user_form,'profile_form':profile_form,'registered':registered},context)
def user_login(request):
context=RequestContext(request)
if request.method =='POST':
form = LoginForm(request.POST)
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
if form.is_valid():
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
if user:
#The accounts is active? Could have been disabled
if user.is_active:
login(request,user)
return HttpResponseRedirect('/rango/')
else: # An inactive account was used - no logging in!
return HttpResponse("Your Rango account is disabled.")
else:
form = LoginForm
return HttpResponse("Invalid login details supplied.")
else:
print form.errors
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be a HTTP GET.
else:
# No context variables to pass to the template system, hence the
# blank dictionary object...
form = LoginForm()
return render_to_response('rango/login.html', {'form':form}, context)
@login_required
def user_logout(request):
# Since we know the user is logged in, we can now just log them out.
logout(request)
# Take the user back to the homepage.
return HttpResponseRedirect('/rango/')
@login_required
def restricted(request):
context = RequestContext(request)
return render_to_response('rango/restricted.html',{},context)
def search(request):
context = RequestContext(request)
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# Run our Bing function to get the results list!
result_list = run_query(query)
return render_to_response('rango/search.html', {'result_list': result_list}, context)
@login_required
def auto_add_page(request):
context = RequestContext(request)
cat_id = None
url = None
title = None
if request.method =='GET':
cat_id = request.GET['category_id']
url = request.GET['url']
title = request.GET['title']
if cat_id:
category = Category.objects.get(id=int(cat_id))
p = Page.objects.get_or_create(category=category, title=title, url=url)
pages = Page.objects.filter(category=category).order_by('-views')
# Adds our results list to the template context under name pages.
context_dict['pages'] = pages
return render_to_response('rango/page_list.html', context_dict, context)
@login_required
def profile(request):
context = RequestContext(request)
#Getting current logged in user
user = request.user
user_profile = UserProfile.objects.get(user=user)
return render_to_response('rango/profile.html',{'user':user,'user_profile':user_profile},context)
def track_url(request):
context = RequestContext(request)
page_id = None
url = '/rango/'
if request.method == 'GET':
if 'page_id' in request.GET:
page_id = request.GET['page_id']
try:
page = Page.objects.get(id=page_id)
page.views = page.views + 1
page.save()
url = page.url
except:
pass
return redirect(url)
@login_required
def like_category(request):
context = RequestContext(request)
cat_id = None
if request.method == 'GET':
cat_id = request.GET['category_id']
likes = 0
if cat_id:
category = Category.objects.get(id=int(cat_id))
if category:
likes = category.likes +1
category.likes = likes
category.save()
return HttpResponse(likes)
def suggest_category(request):
context = RequestContext(request)
cat_list = []
starts_with = ''
if request.method == 'GET':
starts_with = request.GET['suggestion']
cat_list = get_category_list(8, starts_with)
return render_to_response('rango/category_list.html', {'cat_list': cat_list }, context)
#Helper Functions
def encode_url(category_name_url):
return category_name_url.replace('_', ' ')
def decode_url(category_name_url):
return category_name_url.replace(' ', '_')
def get_category_list(max_results = 0,starts_with=''):
cat_list = []
if starts_with:
cat_list = Category.objects.filter(name__istartswith=starts_with)
else:
cat_list = Category.objects.all()
if max_results > 0:
if len(cat_list) > max_results:
cat_list = cat_list[:max_results]
for cat in cat_list:
cat.url = encode_url(cat.name)
return cat_list
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss operations for use in neural networks.
Note: All the losses are added to the `GraphKeys.LOSSES` collection.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.deprecation import deprecated
__all__ = ["absolute_difference",
"add_loss",
"cosine_distance",
"compute_weighted_loss",
"get_losses",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy"]
# TODO(b/32171727): Remove when deprecated `targets` is removed.
def _labels(labels, targets):
if labels is None:
labels = targets
elif targets is not None:
raise ValueError("Can not specify both `labels` and `targets`.")
if labels is None:
raise ValueError("Must provide 1 of `labels` and `targets`.")
return labels
# TODO(b/32171727): Remove when deprecated `weight` is removed.
_WEIGHT_SENTINEL = object()
# TODO(b/32171727): Remove when deprecated `weight` is removed. Also, restore
# weights=1.0 as default in all calling fns.
def _weights(weights, weight):
if weights is _WEIGHT_SENTINEL:
weights = weight
elif weight is not _WEIGHT_SENTINEL:
raise ValueError("Can not specify both `weights` and `weight`.")
if weights is None:
raise ValueError("`weights` cannot be None.")
if weights is _WEIGHT_SENTINEL:
weights = 1.0
return weights
def _scale_losses(losses, weights):
"""Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weights` at which point the reduced `losses` are element-wise
multiplied by `weights` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weights` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
start_index = max(0, weights.get_shape().ndims)
reduction_indices = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum(losses,
reduction_indices=reduction_indices)
reduced_losses = math_ops.mul(reduced_losses, weights)
return math_ops.reduce_sum(reduced_losses)
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: A `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator, array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: A tensor whose elements contain individual loss measurements.
num_present: The number of measurable losses in the tensor.
Returns:
A scalar representing the mean of the losses. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return _safe_div(total_loss, num_present)
@deprecated("2016-12-30", "Use tf.losses.compute_weighted_loss instead.")
@deprecated_args(
"2016-11-25", "`weight` is being deprecated, use `weights`.", "weight")
def compute_weighted_loss(
losses, weights=_WEIGHT_SENTINEL, scope=None, weight=_WEIGHT_SENTINEL):
"""Computes the weighted loss.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
scope: the scope for the operations performed in computing the loss.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
"""
weights = _weights(weights, weight)
with ops.name_scope(scope, "weighted_loss", [losses, weights]):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.to_float(losses)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
if losses.get_shape().ndims is None:
raise ValueError("losses.get_shape().ndims cannot be None")
weights_shape = weights.get_shape()
if weights_shape.ndims is None:
raise ValueError("weight.get_shape().ndims cannot be None")
if weights_shape.ndims > 1 and weights_shape.dims[-1].is_compatible_with(1):
weights = array_ops.squeeze(weights, [-1])
total_loss = _scale_losses(losses, weights)
num_present = _num_present(losses, weights)
mean_loss = _safe_mean(total_loss, num_present)
# convert the result back to the input type
mean_loss = math_ops.cast(mean_loss, input_dtype)
add_loss(mean_loss)
return mean_loss
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
[4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in
effect, tiled to match the size of `losses`. Following this effective tile,
the total number of present elements is the number of non-zero weights.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is True, the value is returned as a tensor of size
[batch_size]. Otherwise, a single scalar tensor is returned.
"""
# If weights is a scalar, its easy to compute:
if weights.get_shape().ndims == 0:
batch_size = array_ops.reshape(array_ops.slice(array_ops.shape(losses),
[0], [1]), [])
num_per_batch = math_ops.div(math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
num_per_batch = array_ops.where(math_ops.equal(weights, 0),
0.0, num_per_batch)
num_per_batch = math_ops.mul(array_ops.ones(
array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights:
if weights.get_shape().ndims >= 1:
reduction_indices = list(range(1, weights.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weights, 0)),
reduction_indices=reduction_indices)
# Next, determine the number of elements that weight would broadcast to:
broadcast_dims = array_ops.slice(array_ops.shape(losses),
[weights.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_per_batch = math_ops.mul(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
@deprecated("2016-12-30", "Use tf.losses.add_loss instead.")
@add_arg_scope
def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):
"""Adds a externally defined loss to the collection of losses.
Args:
loss: A loss `Tensor`.
loss_collection: Optional collection to add the loss to.
"""
if loss_collection:
ops.add_to_collection(loss_collection, loss)
@deprecated("2016-12-30", "Use tf.losses.get_losses instead.")
def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):
"""Gets the list of losses from the loss_collection.
Args:
scope: an optional scope for filtering the losses to return.
loss_collection: Optional losses collection.
Returns:
a list of loss tensors.
"""
return ops.get_collection(loss_collection, scope)
@deprecated("2016-12-30", "Use tf.losses.get_regularization_losses instead.")
def get_regularization_losses(scope=None):
"""Gets the regularization losses.
Args:
scope: an optional scope for filtering the losses to return.
Returns:
A list of loss variables.
"""
return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)
@deprecated("2016-12-30", "Use tf.losses.get_total_loss instead.")
def get_total_loss(add_regularization_losses=True, name="total_loss"):
"""Returns a tensor whose value represents the total loss.
Notice that the function adds the given losses to the regularization losses.
Args:
add_regularization_losses: A boolean indicating whether or not to use the
regularization losses in the sum.
name: The name of the returned tensor.
Returns:
A `Tensor` whose value represents the total loss.
Raises:
ValueError: if `losses` is not iterable.
"""
losses = get_losses()
if add_regularization_losses:
losses += get_regularization_losses()
return math_ops.add_n(losses, name=name)
@deprecated("2016-12-30", "Use tf.losses.absolute_difference instead.")
@deprecated_args(
"2016-11-25",
"`targets` is being deprecated, use `labels`."
" `weight` is being deprecated, use `weights`.",
"targets", "weight")
def absolute_difference(
predictions, labels=None, weights=_WEIGHT_SENTINEL, scope=None,
targets=None, weight=_WEIGHT_SENTINEL):
"""Adds an Absolute Difference loss to the training procedure.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector. If the shape of
`weight` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weight`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weight` is invalid.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
with ops.name_scope(scope, "absolute_difference",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.abs(math_ops.sub(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.sigmoid_cross_entropy instead.")
@deprecated_args(
"2016-11-25", "`weight` is being deprecated, use `weights`", "weight")
def sigmoid_cross_entropy(
logits, multi_class_labels, weights=_WEIGHT_SENTINEL, label_smoothing=0,
scope=None, weight=_WEIGHT_SENTINEL):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weight` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weight` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
logits: [batch_size, num_classes] logits outputs of the network .
multi_class_labels: [batch_size, num_classes] target labels in (0, 1).
weights: Coefficients for the loss. The tensor must be a scalar, a tensor of
shape [batch_size] or shape [batch_size, num_classes].
label_smoothing: If greater than 0 then smooth the labels.
scope: The scope for the operations performed in computing the loss.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weight` is invalid, or if
`weight` is None.
"""
weights = _weights(weights, weight)
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
[logits, multi_class_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(logits, multi_class_labels,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.softmax_cross_entropy instead.")
@deprecated_args(
"2016-11-25", "`weight` is being deprecated, use `weights`", "weight")
def softmax_cross_entropy(
logits, onehot_labels, weights=_WEIGHT_SENTINEL, label_smoothing=0,
scope=None, weight=_WEIGHT_SENTINEL):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits.
`weight` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weight` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Args:
logits: [batch_size, num_classes] logits outputs of the network .
onehot_labels: [batch_size, num_classes] target one_hot_encoded labels.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size].
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weight` is invalid or if `weight` is None.
"""
weights = _weights(weights, weight)
with ops.name_scope(scope, "softmax_cross_entropy_loss",
[logits, onehot_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
losses = nn.softmax_cross_entropy_with_logits(logits, onehot_labels,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.sparse_softmax_cross_entropy instead.")
@deprecated_args(
"2016-11-25", "`weight` is being deprecated, use `weights`", "weight")
def sparse_softmax_cross_entropy(
logits, labels, weights=_WEIGHT_SENTINEL, scope=None,
weight=_WEIGHT_SENTINEL):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weight` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weight` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
labels: [batch_size, 1] or [batch_size] target labels of dtype `int32` or
`int64` in the range `[0, num_classes)`.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size] or [batch_size, 1].
scope: the scope for the operations performed in computing the loss.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shapes of logits, labels, and weight are incompatible, or
if `weight` is None.
"""
weights = _weights(weights, weight)
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
weights = array_ops.squeeze(weights)
losses = nn.sparse_softmax_cross_entropy_with_logits(logits, labels,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.log_loss instead.")
@deprecated_args(
"2016-11-25",
"`targets` is being deprecated, use `labels`."
" `weight` is being deprecated, use `weights`.",
"targets", "weight")
def log_loss(
predictions, labels=None, weights=_WEIGHT_SENTINEL, epsilon=1e-7,
scope=None, targets=None, weight=_WEIGHT_SENTINEL):
"""Adds a Log Loss term to the training procedure.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector. If the shape of
`weight` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weight`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weight` is invalid.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
with ops.name_scope(scope, "log_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = -math_ops.mul(
labels,
math_ops.log(predictions + epsilon)) - math_ops.mul(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.hinge_loss instead.")
@deprecated_args(
"2016-11-25", "`target` is being deprecated, use `labels`.", "target")
def hinge_loss(logits, labels=None, scope=None, target=None):
"""Method that returns the loss tensor for hinge loss.
Args:
logits: The logits, a float tensor.
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
scope: The scope for the operations performed in computing the loss.
target: Deprecated alias for `labels`.
Returns:
A `Tensor` of same shape as logits and target representing the loss values
across the batch.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
labels = _labels(labels, target)
with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
labels = math_ops.to_float(labels)
all_ones = array_ops.ones_like(labels)
labels = math_ops.sub(2 * labels, all_ones)
return nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
@deprecated("2016-12-30", "Use tf.losses.mean_squared_error instead.")
@deprecated_args(
"2016-11-25",
"`targets` is being deprecated, use `labels`."
" `weight` is being deprecated, use `weights`.",
"targets", "weight")
def mean_squared_error(
predictions, labels=None, weights=_WEIGHT_SENTINEL, scope=None,
targets=None, weight=_WEIGHT_SENTINEL):
"""Adds a Sum-of-Squares loss to the training procedure.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector. If the shape of
`weight` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weight`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weight` is invalid.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
with ops.name_scope(scope, "mean_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.square(math_ops.sub(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30", "Use tf.losses.mean_pairwise_squared_error instead.")
@deprecated_args(
"2016-11-25",
"`targets` is being deprecated, use `labels`."
" `weight` is being deprecated, use `weights`.",
"targets", "weight")
def mean_pairwise_squared_error(
predictions, labels=None, weights=_WEIGHT_SENTINEL, scope=None,
targets=None, weight=_WEIGHT_SENTINEL):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of size [batch_size, d0, ... dN], the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector.
Args:
predictions: The predicted outputs, a tensor of size [batch_size, d0, .. dN]
where N+1 is the total number of dimensions in `predictions`.
labels: The ground truth output tensor, whose shape must match the shape of
the `predictions` tensor.
weights: Coefficients for the loss a scalar, a tensor of shape [batch_size]
or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weight` is invalid.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
with ops.name_scope(scope, "mean_pairwise_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
diffs = math_ops.sub(predictions, labels)
# Need to verify here since the function doesn't use compute_weighted_loss
if diffs.get_shape().ndims is None:
raise ValueError("diffs.get_shape().ndims cannot be None")
if weights.get_shape().ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
reduction_indices = list(range(1, diffs.get_shape().ndims))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs),
reduction_indices=reduction_indices)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * _safe_div(sum_squares_diff_per_batch,
num_present_per_batch)
sum_diff = math_ops.reduce_sum(diffs, reduction_indices=reduction_indices)
term2 = 2.0 * _safe_div(math_ops.square(sum_diff),
math_ops.square(num_present_per_batch))
loss = _scale_losses(term1 - term2, weights)
mean_loss = array_ops.where(math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
add_loss(mean_loss)
return mean_loss
@deprecated("2016-12-30", "Use tf.losses.cosine_distance instead.")
@deprecated_args(
"2016-11-25",
"`targets` is being deprecated, use `labels`."
" `weight` is being deprecated, use `weights`.",
"targets", "weight")
def cosine_distance(
predictions, labels=None, dim=None, weights=_WEIGHT_SENTINEL, scope=None,
targets=None, weight=_WEIGHT_SENTINEL):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
predictions: An arbitrary matrix.
labels: A `Tensor` whose shape matches 'predictions'
dim: The dimension along which the cosine distance is computed.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`weights` is `None`.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
if dim is None:
raise ValueError("`dim` cannot be None.")
with ops.name_scope(scope, "cosine_distance_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
radial_diffs = math_ops.mul(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,])
return compute_weighted_loss(losses, weights, scope=scope)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_selfip
short_description: Manage Self-IPs on a BIG-IP system
description:
- Manage Self-IPs on a BIG-IP system.
version_added: "2.2"
options:
address:
description:
- The IP addresses for the new self IP. This value is ignored upon update
as addresses themselves cannot be changed after they are created.
allow_service:
description:
- Configure port lockdown for the Self IP. By default, the Self IP has a
"default deny" policy. This can be changed to allow TCP and UDP ports
as well as specific protocols. This list should contain C(protocol):C(port)
values.
name:
description:
- The self IP to create.
required: True
default: Value of C(address)
netmask:
description:
- The netmask for the self IP. When creating a new Self IP, this value
is required.
state:
description:
- The state of the variable on the system. When C(present), guarantees
that the Self-IP exists with the provided attributes. When C(absent),
removes the Self-IP from the system.
default: present
choices:
- absent
- present
traffic_group:
description:
- The traffic group for the Self IP addresses in an active-active,
redundant load balancer configuration. When creating a new Self IP, if
this value is not specified, the default of C(/Common/traffic-group-local-only)
will be used.
vlan:
description:
- The VLAN that the new self IPs will be on. When creating a new Self
IP, this value is required.
route_domain:
description:
- The route domain id of the system. When creating a new Self IP, if
this value is not specified, a default value of C(0) will be used.
version_added: 2.3
partition:
description:
- Device partition to manage resources on. You can set different partitions
for Self IPs, but the address used may not match any other address used
by a Self IP. In that sense, Self IPs are not isolated by partitions as
other resources on a BIG-IP are.
default: Common
version_added: 2.5
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the netaddr Python package on the host.
extends_documentation_fragment: f5
requirements:
- netaddr
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create Self IP
bigip_selfip:
address: 10.10.10.10
name: self1
netmask: 255.255.255.0
password: secret
server: lb.mydomain.com
user: admin
validate_certs: no
vlan: vlan1
delegate_to: localhost
- name: Create Self IP with a Route Domain
bigip_selfip:
server: lb.mydomain.com
user: admin
password: secret
validate_certs: no
name: self1
address: 10.10.10.10
netmask: 255.255.255.0
vlan: vlan1
route_domain: 10
allow_service: default
delegate_to: localhost
- name: Delete Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
delegate_to: localhost
- name: Allow management web UI to be accessed on this Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
allow_service:
- tcp:443
delegate_to: localhost
- name: Allow HTTPS and SSH access to this Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
allow_service:
- tcp:443
- tcp:22
delegate_to: localhost
- name: Allow all services access to this Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
allow_service:
- all
delegate_to: localhost
- name: Allow only GRE and IGMP protocols access to this Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
allow_service:
- gre:0
- igmp:0
delegate_to: localhost
- name: Allow all TCP, but no other protocols access to this Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
allow_service:
- tcp:0
delegate_to: localhost
'''
RETURN = r'''
allow_service:
description: Services that allowed via this Self IP
returned: changed
type: list
sample: ['igmp:0','tcp:22','udp:53']
address:
description: The address for the Self IP
returned: created
type: string
sample: 192.0.2.10
name:
description: The name of the Self IP
returned: created, changed and deleted
type: string
sample: self1
netmask:
description: The netmask of the Self IP
returned: changed and created
type: string
sample: 255.255.255.0
traffic_group:
description: The traffic group that the Self IP is a member of
returned: changed and created
type: string
sample: traffic-group-local-only
vlan:
description: The VLAN set on the Self IP
returned: changed and created
type: string
sample: vlan1
'''
import os
import re
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
from ansible.module_utils.six import iteritems
from collections import defaultdict
try:
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
try:
from netaddr import IPNetwork, AddrFormatError, IPAddress
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
class Parameters(AnsibleF5Parameters):
api_map = {
'trafficGroup': 'traffic_group',
'allowService': 'allow_service'
}
updatables = [
'traffic_group', 'allow_service', 'vlan', 'route_domain', 'netmask'
]
returnables = [
'traffic_group', 'allow_service', 'vlan', 'route_domain', 'netmask'
]
api_attributes = [
'trafficGroup', 'allowService', 'vlan', 'address'
]
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have
# an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def address(self):
address = "{0}%{1}/{2}".format(
self.ip, self.route_domain, self.netmask
)
return address
@address.setter
def address(self, value):
self._values['ip'] = value
@property
def ip(self):
if self._values['ip'] is None:
return None
try:
ip = str(IPAddress(self._values['ip']))
return ip
except AddrFormatError:
raise F5ModuleError(
'The provided address is not a valid IP address'
)
@property
def traffic_group(self):
if self._values['traffic_group'] is None:
return None
return self._fqdn_name(self._values['traffic_group'])
@property
def route_domain(self):
if self._values['route_domain'] is None:
return None
result = int(self._values['route_domain'])
return result
@property
def netmask(self):
if self._values['netmask'] is None:
return None
# Check if numeric
if isinstance(self._values['netmask'], int):
result = int(self._values['netmask'])
if 0 < result < 256:
return result
raise F5ModuleError(
'The provided netmask {0} is neither in IP or CIDR format'.format(result)
)
else:
try:
# IPv4 netmask
address = '0.0.0.0/' + self._values['netmask']
ip = IPNetwork(address)
except AddrFormatError as ex:
try:
# IPv6 netmask
address = '::/' + self._values['netmask']
ip = IPNetwork(address)
except AddrFormatError as ex:
raise F5ModuleError(
'The provided netmask {0} is neither in IP or CIDR format'.format(self._values['netmask'])
)
result = int(ip.prefixlen)
return result
@property
def allow_service(self):
"""Verifies that a supplied service string has correct format
The string format for port lockdown is PROTOCOL:PORT. This method
will verify that the provided input matches the allowed protocols
and the port ranges before submitting to BIG-IP.
The only allowed exceptions to this rule are the following values
* all
* default
* none
These are special cases that are handled differently in the API.
"all" is set as a string, "default" is set as a one item list, and
"none" removes the key entirely from the REST API.
:raises F5ModuleError:
"""
if self._values['allow_service'] is None:
return None
result = []
allowed_protocols = [
'eigrp', 'egp', 'gre', 'icmp', 'igmp', 'igp', 'ipip',
'l2tp', 'ospf', 'pim', 'tcp', 'udp'
]
special_protocols = [
'all', 'none', 'default'
]
for svc in self._values['allow_service']:
if svc in special_protocols:
result = [svc]
break
elif svc in allowed_protocols:
full_service = '{0}:0'.format(svc)
result.append(full_service)
else:
tmp = svc.split(':')
if tmp[0] not in allowed_protocols:
raise F5ModuleError(
"The provided protocol '%s' is invalid" % (tmp[0])
)
try:
port = int(tmp[1])
except Exception:
raise F5ModuleError(
"The provided port '%s' is not a number" % (tmp[1])
)
if port < 0 or port > 65535:
raise F5ModuleError(
"The provided port '{0}' must be between 0 and 65535".format(port)
)
else:
result.append(svc)
return set(result)
def _fqdn_name(self, value):
if value is not None and not value.startswith('/'):
return '/{0}/{1}'.format(self.partition, value)
return value
@property
def vlan(self):
if self._values['vlan'] is None:
return None
return self._fqdn_name(self._values['vlan'])
class ApiParameters(Parameters):
api_map = {}
@property
def address(self):
if self.ip and self.route_domain and self.netmask:
return '{0}%{1}/{2}'.format(self.ip, self.route_domain, self.netmask)
elif self.ip and self.netmask:
return '{0}/{1}'.format(self.ip, self.netmask)
@address.setter
def address(self, value):
pattern = '^(?P<ip>[0-9A-Fa-f:.]+)%?(?P<rd>\d+)?\/(?P<nm>\d+)$'
matches = re.match(pattern, value)
if not matches:
raise F5ModuleError(
"The specified address is malformed. Please see documentation."
)
try:
ip = matches.group('ip')
self._values['ip'] = str(IPAddress(ip))
except AddrFormatError:
raise F5ModuleError(
'The provided address is not a valid IP address'
)
self._values['route_domain'] = matches.group('rd')
self._values['netmask'] = matches.group('nm')
@property
def allow_service(self):
return self._values['allow_service']
@property
def trafficGroup(self):
return self.traffic_group
@trafficGroup.setter
def trafficGroup(self, value):
self._values['traffic_group'] = value
@property
def allowService(self):
return self._values['allow_service']
@allowService.setter
def allowService(self, value):
if value == 'all':
self._values['allow_service'] = set(['all'])
else:
self._values['allow_service'] = set([str(x) for x in value])
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = ApiParameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = ApiParameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if k in ['netmask', 'route_domain']:
changed['address'] = change
else:
changed[k] = change
if changed:
self.changes = ApiParameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
changed = self.update()
else:
changed = self.create()
return changed
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def read_current_from_device(self):
resource = self.client.api.tm.net.selfips.selfip.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
params = ApiParameters(result)
return params
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.changes.api_params()
resource = self.client.api.tm.net.selfips.selfip.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def create(self):
if self.want.address is None or self.want.netmask is None:
raise F5ModuleError(
'An address and a netmask must be specified'
)
if self.want.vlan is None:
raise F5ModuleError(
'A VLAN name must be specified'
)
if self.want.traffic_group is None:
self.want.update({'traffic_group': '/Common/traffic-group-local-only'})
if self.want.route_domain is None:
self.want.update({'route_domain': 0})
if self.want.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the Self IP")
def create_on_device(self):
params = self.changes.api_params()
self.client.api.tm.net.selfips.selfip.create(
name=self.want.name,
partition=self.want.partition,
address=self.want.address,
vlan=self.want.vlan,
**params
)
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the Self IP")
return True
def remove_from_device(self):
resource = self.client.api.tm.net.selfips.selfip.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
def exists(self):
result = self.client.api.tm.net.selfips.selfip.exists(
name=self.want.name,
partition=self.want.partition
)
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def allow_service(self):
"""Returns services formatted for consumption by f5-sdk update
The BIG-IP endpoint for services takes different values depending on
what you want the "allowed services" to be. It can be any of the
following
- a list containing "protocol:port" values
- the string "all"
- a null value, or None
This is a convenience function to massage the values the user has
supplied so that they are formatted in such a way that BIG-IP will
accept them and apply the specified policy.
:param services: The services to format. This is always a Python set
:return:
"""
if self.want.allow_service is None:
return None
result = list(self.want.allow_service)
if self.want.allow_service == self.have.allow_service:
return None
elif result[0] == 'none' and self.have.allow_service is None:
return None
elif result[0] == 'all':
return 'all'
elif result[0] == 'none':
return []
else:
return list(result)
@property
def netmask(self):
if self.want.netmask is None:
return None
try:
address = IPNetwork(self.have.ip)
if self.want.route_domain is not None:
nipnet = "{0}%{1}/{2}".format(address.ip, self.want.route_domain, self.want.netmask)
cipnet = "{0}%{1}/{2}".format(address.ip, self.want.route_domain, self.have.netmask)
elif self.have.route_domain is not None:
nipnet = "{0}%{1}/{2}".format(address.ip, self.have.route_domain, self.want.netmask)
cipnet = "{0}%{1}/{2}".format(address.ip, self.have.route_domain, self.have.netmask)
else:
nipnet = "{0}/{1}".format(address.ip, self.want.netmask)
cipnet = "{0}/{1}".format(address.ip, self.have.netmask)
if nipnet != cipnet:
return nipnet
except AddrFormatError:
raise F5ModuleError(
'The provided address/netmask value "{0}" was invalid'.format(self.have.ip)
)
@property
def route_domain(self):
if self.want.route_domain is None:
return None
try:
address = IPNetwork(self.have.ip)
if self.want.netmask is not None:
nipnet = "{0}%{1}/{2}".format(address.ip, self.want.route_domain, self.want.netmask)
cipnet = "{0}%{1}/{2}".format(address.ip, self.have.route_domain, self.want.netmask)
elif self.have.netmask is not None:
nipnet = "{0}%{1}/{2}".format(address.ip, self.want.route_domain, self.have.netmask)
cipnet = "{0}%{1}/{2}".format(address.ip, self.have.route_domain, self.have.netmask)
if nipnet != cipnet:
return nipnet
except AddrFormatError:
raise F5ModuleError(
'The provided address/netmask value was invalid'
)
@property
def traffic_group(self):
if self.want.traffic_group == self.have.traffic_group:
return None
return self.want.traffic_group
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
address=dict(),
allow_service=dict(type='list'),
name=dict(required=True),
netmask=dict(),
traffic_group=dict(),
vlan=dict(),
route_domain=dict()
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
if not HAS_NETADDR:
raise F5ModuleError("The python netaddr module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| |
from __future__ import absolute_import
from django.contrib.auth.decorators import login_required
from django.contrib.messages.views import SuccessMessageMixin
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views import generic
from .test_forms import AuthorForm, ContactForm
from .models import Artist, Author, Book, Page, BookSigning
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
context = super(CustomTemplateView, self).get_context_data(**kwargs)
context.update({'key': 'value'})
return context
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'first': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ContactView(generic.FormView):
form_class = ContactForm
success_url = reverse_lazy('authors_list')
template_name = 'generic_views/form.html'
class ArtistCreate(generic.CreateView):
model = Artist
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
def get_queryset(self):
return Author.objects.all()
class BookDetailGetObjectCustomQueryset(BookDetail):
def get_object(self, queryset=None):
return super(BookDetailGetObjectCustomQueryset,self).get_object(
queryset=Book.objects.filter(pk=2))
class CustomContextView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name='dummy')
def get_object(self):
return Book(name="dummy")
def get_context_data(self, **kwargs):
context = {'custom_key': 'custom_value'}
context.update(kwargs)
return super(CustomContextView, self).get_context_data(**context)
def get_context_object_name(self, obj):
return "test_name"
class BookSigningConfig(object):
model = BookSigning
date_field = 'event_date'
# use the same templates as for books
def get_template_names(self):
return ['generic_views/book%s.html' % self.template_name_suffix]
class BookSigningArchive(BookSigningConfig, generic.ArchiveIndexView):
pass
class BookSigningYearArchive(BookSigningConfig, generic.YearArchiveView):
pass
class BookSigningMonthArchive(BookSigningConfig, generic.MonthArchiveView):
pass
class BookSigningWeekArchive(BookSigningConfig, generic.WeekArchiveView):
pass
class BookSigningDayArchive(BookSigningConfig, generic.DayArchiveView):
pass
class BookSigningTodayArchive(BookSigningConfig, generic.TodayArchiveView):
pass
class BookSigningDetail(BookSigningConfig, generic.DateDetailView):
context_object_name = 'book'
class NonModel(object):
id = "non_model_1"
_meta = None
class NonModelDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
model = NonModel
def get_object(self, queryset=None):
return NonModel()
| |
#!/usr/bin/env python3
# Copyright (c) 2015-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
import json
import os
from test_framework.messages import (
sha256,
tx_from_hex,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
class DecodeScriptTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
self.log.info("- P2PK")
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
self.log.info("- P2PKH")
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
self.log.info("- multisig")
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
self.log.info("- P2SH")
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# null data scriptSig - no such thing because null data scripts cannot be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '5dd1d3a048119c27b28293056724d9522f26d945'
push_public_key_hash = '14' + public_key_hash
uncompressed_public_key = '04b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb25e01fc8fde47c96c98a4f3a8123e33a38a50cf9025cc8c4494a518f991792bb7'
push_uncompressed_public_key = '41' + uncompressed_public_key
p2wsh_p2pk_script_hash = 'd8590cf8ea0674cf3d49fd7ca249b85ef7485dea62c138468bddeb20cd6519f7'
# below are test cases for all of the standard transaction types
self.log.info("- P2PK")
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
assert_equal('pubkey', rpc_result['type'])
# P2PK is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
self.log.info("- P2PKH")
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('pubkeyhash', rpc_result['type'])
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# P2PKH is translated to P2WPKH
assert_equal('witness_v0_keyhash', rpc_result['segwit']['type'])
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
self.log.info("- multisig")
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
multisig_script = '52' + push_public_key + push_public_key + push_public_key + '53ae'
rpc_result = self.nodes[0].decodescript(multisig_script)
assert_equal('multisig', rpc_result['type'])
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# multisig in P2WSH
multisig_script_hash = sha256(bytes.fromhex(multisig_script)).hex()
assert_equal('witness_v0_scripthash', rpc_result['segwit']['type'])
assert_equal('0 ' + multisig_script_hash, rpc_result['segwit']['asm'])
self.log.info ("- P2SH")
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('scripthash', rpc_result['type'])
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# P2SH does not work in segwit secripts. decodescript should not return a result for it.
assert 'segwit' not in rpc_result
self.log.info("- null data")
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('nulldata', rpc_result['type'])
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
self.log.info("- CLTV redeem script")
# redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
cltv_script = '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac'
rpc_result = self.nodes[0].decodescript(cltv_script)
assert_equal('nonstandard', rpc_result['type'])
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
# CLTV script in P2WSH
cltv_script_hash = sha256(bytes.fromhex(cltv_script)).hex()
assert_equal('0 ' + cltv_script_hash, rpc_result['segwit']['asm'])
self.log.info("- P2PK with uncompressed pubkey")
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_uncompressed_public_key + 'ac')
assert_equal('pubkey', rpc_result['type'])
assert_equal(uncompressed_public_key + ' OP_CHECKSIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
self.log.info("- multisig with uncompressed pubkey")
# <m> <A pubkey> <B pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# the purpose of this test is to check that a segwit script is not returned for bare multisig scripts
# with an uncompressed pubkey in them.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_uncompressed_public_key +'52ae')
assert_equal('multisig', rpc_result['type'])
assert_equal('2 ' + public_key + ' ' + uncompressed_public_key + ' 2 OP_CHECKMULTISIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
self.log.info("- P2WPKH")
# 0 <PubKeyHash>
rpc_result = self.nodes[0].decodescript('00' + push_public_key_hash)
assert_equal('witness_v0_keyhash', rpc_result['type'])
assert_equal('0 ' + public_key_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
self.log.info("- P2WSH")
# 0 <ScriptHash>
# even though this hash is of a P2PK script which is better used as bare P2WPKH, it should not matter
# for the purpose of this test.
rpc_result = self.nodes[0].decodescript('0020' + p2wsh_p2pk_script_hash)
assert_equal('witness_v0_scripthash', rpc_result['type'])
assert_equal('0 ' + p2wsh_p2pk_script_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
self.log.info("- P2TR")
# 1 <x-only pubkey>
xonly_public_key = '01'*32 # first ever P2TR output on mainnet
rpc_result = self.nodes[0].decodescript('5120' + xonly_public_key)
assert_equal('witness_v1_taproot', rpc_result['type'])
assert_equal('1 ' + xonly_public_key, rpc_result['asm'])
assert 'segwit' not in rpc_result
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
self.log.info("- various mainnet txs")
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = tx_from_hex(tx)
self.log.info("- tx not passing DER signature checks")
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
self.log.info("- tx passing DER signature checks")
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = txSave.vin[0].scriptSig.hex()[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
self.log.info("- P2PK scriptSig")
txSave.vin[0].scriptSig = bytes.fromhex(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = bytes.fromhex(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
self.log.info("- multisig scriptSig")
txSave.vin[0].scriptSig = bytes.fromhex('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
self.log.info("- scriptSig that contains more than push operations")
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = bytes.fromhex('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def decodescript_datadriven_tests(self):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_decodescript.json'), encoding='utf-8') as f:
dd_tests = json.load(f)
for script, result in dd_tests:
rpc_result = self.nodes[0].decodescript(script)
assert_equal(result, rpc_result)
def run_test(self):
self.log.info("Test decoding of standard input scripts [scriptSig]")
self.decodescript_script_sig()
self.log.info("Test decoding of standard output scripts [scriptPubKey]")
self.decodescript_script_pub_key()
self.log.info("Test 'asm' script decoding of transactions")
self.decoderawtransaction_asm_sighashtype()
self.log.info("Data-driven tests")
self.decodescript_datadriven_tests()
if __name__ == '__main__':
DecodeScriptTest().main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the nova.db namespace. Call these
functions from nova.db namespace, not the nova.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:sql_connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/nova/nova.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from oslo.config import cfg
from nova.cells import rpcapi as cells_rpcapi
from nova import exception
from nova.openstack.common.db import api as db_api
from nova.openstack.common import log as logging
db_opts = [
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('instance_name_template',
default='instance-%08x',
help='Template string to be used to generate instance names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
_BACKEND_MAPPING = {'sqlalchemy': 'nova.db.sqlalchemy.api'}
IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING)
LOG = logging.getLogger(__name__)
class NoMoreNetworks(exception.NovaException):
"""No more available networks."""
pass
class NoMoreTargets(exception.NovaException):
"""No more available targets."""
pass
###################
def constraint(**conditions):
"""Return a constraint object suitable for use with some updates."""
return IMPL.constraint(**conditions)
def equal_any(*values):
"""Return an equality condition object suitable for use in a constraint.
Equal_any conditions require that a model object's attribute equal any
one of the given values.
"""
return IMPL.equal_any(*values)
def not_equal(*values):
"""Return an inequality condition object suitable for use in a constraint.
Not_equal conditions require that a model object's attribute differs from
all of the given values.
"""
return IMPL.not_equal(*values)
###################
def service_destroy(context, instance_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, instance_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_by_host(context, host):
"""Get all services for a given host."""
return IMPL.service_get_all_by_host(context, host)
def service_get_by_compute_host(context, host):
"""Get the service entry for a given compute host.
Returns the service entry joined with the compute_node entry.
"""
return IMPL.service_get_by_compute_host(context, host)
def service_get_by_args(context, host, binary):
"""Get the state of a service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on a service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
###################
def compute_node_get(context, compute_id):
"""Get a computeNode."""
return IMPL.compute_node_get(context, compute_id)
def compute_node_get_all(context):
"""Get all computeNodes."""
return IMPL.compute_node_get_all(context)
def compute_node_search_by_hypervisor(context, hypervisor_match):
"""Get computeNodes given a hypervisor hostname match string."""
return IMPL.compute_node_search_by_hypervisor(context, hypervisor_match)
def compute_node_create(context, values):
"""Create a computeNode from the values dictionary."""
return IMPL.compute_node_create(context, values)
def compute_node_update(context, compute_id, values, prune_stats=False):
"""Set the given properties on a computeNode and update it.
Raises ComputeHostNotFound if computeNode does not exist.
"""
return IMPL.compute_node_update(context, compute_id, values, prune_stats)
def compute_node_delete(context, compute_id):
"""Delete a computeNode from the database.
Raises ComputeHostNotFound if computeNode does not exist.
"""
return IMPL.compute_node_delete(context, compute_id)
def compute_node_statistics(context):
return IMPL.compute_node_statistics(context)
###################
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
return IMPL.certificate_create(context, values)
def certificate_get_all_by_project(context, project_id):
"""Get all certificates for a project."""
return IMPL.certificate_get_all_by_project(context, project_id)
def certificate_get_all_by_user(context, user_id):
"""Get all certificates for a user."""
return IMPL.certificate_get_all_by_user(context, user_id)
def certificate_get_all_by_user_and_project(context, user_id, project_id):
"""Get all certificates for a user and project."""
return IMPL.certificate_get_all_by_user_and_project(context,
user_id,
project_id)
###################
def floating_ip_get(context, id):
return IMPL.floating_ip_get(context, id)
def floating_ip_get_pools(context):
"""Returns a list of floating ip pools."""
return IMPL.floating_ip_get_pools(context)
def floating_ip_allocate_address(context, project_id, pool):
"""Allocate free floating ip from specified pool and return the address.
Raises if one is not available.
"""
return IMPL.floating_ip_allocate_address(context, project_id, pool)
def floating_ip_bulk_create(context, ips):
"""Create a lot of floating ips from the values dictionary."""
return IMPL.floating_ip_bulk_create(context, ips)
def floating_ip_bulk_destroy(context, ips):
"""Destroy a lot of floating ips from the values dictionary."""
return IMPL.floating_ip_bulk_destroy(context, ips)
def floating_ip_create(context, values):
"""Create a floating ip from the values dictionary."""
return IMPL.floating_ip_create(context, values)
def floating_ip_count_by_project(context, project_id, session=None):
"""Count floating ips used by project."""
return IMPL.floating_ip_count_by_project(context, project_id,
session=session)
def floating_ip_deallocate(context, address):
"""Deallocate a floating ip by address."""
return IMPL.floating_ip_deallocate(context, address)
def floating_ip_destroy(context, address):
"""Destroy the floating_ip or raise if it does not exist."""
return IMPL.floating_ip_destroy(context, address)
def floating_ip_disassociate(context, address):
"""Disassociate a floating ip from a fixed ip by address.
:returns: the fixed ip record joined to network record or None
if the ip was not associated to an ip.
"""
return IMPL.floating_ip_disassociate(context, address)
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
"""Associate a floating ip to a fixed_ip by address.
:returns: the fixed ip record joined to network record or None
if the ip was already associated to the fixed ip.
"""
return IMPL.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
host)
def floating_ip_get_all(context):
"""Get all floating ips."""
return IMPL.floating_ip_get_all(context)
def floating_ip_get_all_by_host(context, host):
"""Get all floating ips by host."""
return IMPL.floating_ip_get_all_by_host(context, host)
def floating_ip_get_all_by_project(context, project_id):
"""Get all floating ips by project."""
return IMPL.floating_ip_get_all_by_project(context, project_id)
def floating_ip_get_by_address(context, address):
"""Get a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_get_by_address(context, address)
def floating_ip_get_by_fixed_address(context, fixed_address):
"""Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_address(context, fixed_address)
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
"""Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id)
def floating_ip_update(context, address, values):
"""Update a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_update(context, address, values)
def floating_ip_set_auto_assigned(context, address):
"""Set auto_assigned flag to floating ip."""
return IMPL.floating_ip_set_auto_assigned(context, address)
def dnsdomain_list(context):
"""Get a list of all zones in our database, public and private."""
return IMPL.dnsdomain_list(context)
def dnsdomain_register_for_zone(context, fqdomain, zone):
"""Associated a DNS domain with an availability zone."""
return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone)
def dnsdomain_register_for_project(context, fqdomain, project):
"""Associated a DNS domain with a project id."""
return IMPL.dnsdomain_register_for_project(context, fqdomain, project)
def dnsdomain_unregister(context, fqdomain):
"""Purge associations for the specified DNS zone."""
return IMPL.dnsdomain_unregister(context, fqdomain)
def dnsdomain_get(context, fqdomain):
"""Get the db record for the specified domain."""
return IMPL.dnsdomain_get(context, fqdomain)
####################
def migration_update(context, id, values):
"""Update a migration instance."""
return IMPL.migration_update(context, id, values)
def migration_create(context, values):
"""Create a migration record."""
return IMPL.migration_create(context, values)
def migration_get(context, migration_id):
"""Finds a migration by the id."""
return IMPL.migration_get(context, migration_id)
def migration_get_by_instance_and_status(context, instance_uuid, status):
"""Finds a migration by the instance uuid its migrating."""
return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
status)
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute):
"""
Finds all unconfirmed migrations within the confirmation window for
a specific destination compute host.
"""
return IMPL.migration_get_unconfirmed_by_dest_compute(context,
confirm_window, dest_compute)
def migration_get_in_progress_by_host_and_node(context, host, node):
"""Finds all migrations for the given host + node that are not yet
confirmed or reverted.
"""
return IMPL.migration_get_in_progress_by_host_and_node(context, host, node)
####################
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False):
"""Associate fixed ip to instance.
Raises if fixed ip is not available.
"""
return IMPL.fixed_ip_associate(context, address, instance_uuid, network_id,
reserved)
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
"""Find free ip in network and associate it to instance or host.
Raises if one is not available.
"""
return IMPL.fixed_ip_associate_pool(context, network_id,
instance_uuid, host)
def fixed_ip_create(context, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_create(context, values)
def fixed_ip_bulk_create(context, ips):
"""Create a lot of fixed ips from the values dictionary."""
return IMPL.fixed_ip_bulk_create(context, ips)
def fixed_ip_disassociate(context, address):
"""Disassociate a fixed ip from an instance by address."""
return IMPL.fixed_ip_disassociate(context, address)
def fixed_ip_disassociate_all_by_timeout(context, host, time):
"""Disassociate old fixed ips from host."""
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
def fixed_ip_get(context, id, get_network=False):
"""Get fixed ip by id or raise if it does not exist.
If get_network is true, also return the assocated network.
"""
return IMPL.fixed_ip_get(context, id, get_network)
def fixed_ip_get_all(context):
"""Get all defined fixed ips."""
return IMPL.fixed_ip_get_all(context)
def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address)
def fixed_ip_get_by_address_detailed(context, address):
"""Get detailed fixed ip info by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address_detailed(context, address)
def fixed_ip_get_by_floating_address(context, floating_address):
"""Get a fixed ip by a floating address."""
return IMPL.fixed_ip_get_by_floating_address(context, floating_address)
def fixed_ip_get_by_instance(context, instance_uuid):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_by_instance(context, instance_uuid)
def fixed_ip_get_by_network_host(context, network_uuid, host):
"""Get fixed ip for a host in a network."""
return IMPL.fixed_ip_get_by_network_host(context, network_uuid, host)
def fixed_ips_by_virtual_interface(context, vif_id):
"""Get fixed ips by virtual interface or raise if none exist."""
return IMPL.fixed_ips_by_virtual_interface(context, vif_id)
def fixed_ip_update(context, address, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_update(context, address, values)
def fixed_ip_count_by_project(context, project_id, session=None):
"""Count fixed ips used by project."""
return IMPL.fixed_ip_count_by_project(context, project_id,
session=session)
####################
def virtual_interface_create(context, values):
"""Create a virtual interface record in the database."""
return IMPL.virtual_interface_create(context, values)
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table."""
return IMPL.virtual_interface_get(context, vif_id)
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table filtering on address."""
return IMPL.virtual_interface_get_by_address(context, address)
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table filtering on vif uuid."""
return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
def virtual_interface_get_by_instance(context, instance_id):
"""Gets all virtual_interfaces for instance."""
return IMPL.virtual_interface_get_by_instance(context, instance_id)
def virtual_interface_get_by_instance_and_network(context, instance_id,
network_id):
"""Gets all virtual interfaces for instance."""
return IMPL.virtual_interface_get_by_instance_and_network(context,
instance_id,
network_id)
def virtual_interface_delete_by_instance(context, instance_id):
"""Delete virtual interface records associated with instance."""
return IMPL.virtual_interface_delete_by_instance(context, instance_id)
def virtual_interface_get_all(context):
"""Gets all virtual interfaces from the table."""
return IMPL.virtual_interface_get_all(context)
####################
def instance_create(context, values):
"""Create an instance from the values dictionary."""
return IMPL.instance_create(context, values)
def instance_data_get_for_project(context, project_id, session=None):
"""Get (instance_count, total_cores, total_ram) for project."""
return IMPL.instance_data_get_for_project(context, project_id,
session=session)
def instance_destroy(context, instance_uuid, constraint=None,
update_cells=True):
"""Destroy the instance or raise if it does not exist."""
rv = IMPL.instance_destroy(context, instance_uuid, constraint)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_destroy_at_top(context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance destroy"))
return rv
def instance_get_by_uuid(context, uuid):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get_by_uuid(context, uuid)
def instance_get(context, instance_id):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id)
def instance_get_all(context, columns_to_join=None):
"""Get all instances."""
return IMPL.instance_get_all(context, columns_to_join=columns_to_join)
def instance_get_all_by_filters(context, filters, sort_key='created_at',
sort_dir='desc', limit=None, marker=None,
columns_to_join=None):
"""Get all instances that match all filters."""
return IMPL.instance_get_all_by_filters(context, filters, sort_key,
sort_dir, limit=limit,
marker=marker,
columns_to_join=columns_to_join)
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
"""Get instances and joins active during a certain time window.
Specifying a project_id will filter for a certain project.
Specifying a host will filter for instances on a given compute host.
"""
return IMPL.instance_get_active_by_window_joined(context, begin, end,
project_id, host)
def instance_get_all_by_host(context, host, columns_to_join=None):
"""Get all instances belonging to a host."""
return IMPL.instance_get_all_by_host(context, host, columns_to_join)
def instance_get_all_by_host_and_node(context, host, node):
"""Get all instances belonging to a node."""
return IMPL.instance_get_all_by_host_and_node(context, host, node)
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
"""Get all instances belonging to a host with a different type_id."""
return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id)
def instance_get_floating_address(context, instance_id):
"""Get the first floating ip address of an instance."""
return IMPL.instance_get_floating_address(context, instance_id)
def instance_floating_address_get_all(context, instance_uuid):
"""Get all floating ip addresses of an instance."""
return IMPL.instance_floating_address_get_all(context, instance_uuid)
def instance_get_all_hung_in_rebooting(context, reboot_window):
"""Get all instances stuck in a rebooting state."""
return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window)
def instance_update(context, instance_uuid, values, update_cells=True):
"""Set the given properties on an instance and update it.
Raises NotFound if instance does not exist.
"""
rv = IMPL.instance_update(context, instance_uuid, values)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_update_at_top(context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance update"))
return rv
def instance_update_and_get_original(context, instance_uuid, values):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_id: = instance id or uuid
:param values: = dict containing column values
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
rv = IMPL.instance_update_and_get_original(context, instance_uuid, values)
try:
cells_rpcapi.CellsAPI().instance_update_at_top(context, rv[1])
except Exception:
LOG.exception(_("Failed to notify cells of instance update"))
return rv
def instance_add_security_group(context, instance_id, security_group_id):
"""Associate the given security group with the given instance."""
return IMPL.instance_add_security_group(context, instance_id,
security_group_id)
def instance_remove_security_group(context, instance_id, security_group_id):
"""Disassociate the given security group from the given instance."""
return IMPL.instance_remove_security_group(context, instance_id,
security_group_id)
###################
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
"""
return IMPL.instance_info_cache_get(context, instance_uuid)
def instance_info_cache_update(context, instance_uuid, values,
update_cells=True):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
"""
rv = IMPL.instance_info_cache_update(context, instance_uuid, values)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_info_cache_update_at_top(
context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance info "
"cache update"))
return rv
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
"""
return IMPL.instance_info_cache_delete(context, instance_uuid)
###################
def key_pair_create(context, values):
"""Create a key_pair from the values dictionary."""
return IMPL.key_pair_create(context, values)
def key_pair_destroy(context, user_id, name):
"""Destroy the key_pair or raise if it does not exist."""
return IMPL.key_pair_destroy(context, user_id, name)
def key_pair_get(context, user_id, name):
"""Get a key_pair or raise if it does not exist."""
return IMPL.key_pair_get(context, user_id, name)
def key_pair_get_all_by_user(context, user_id):
"""Get all key_pairs by user."""
return IMPL.key_pair_get_all_by_user(context, user_id)
def key_pair_count_by_user(context, user_id):
"""Count number of key pairs for the given user ID."""
return IMPL.key_pair_count_by_user(context, user_id)
####################
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a free network to a project."""
return IMPL.network_associate(context, project_id, network_id, force)
def network_count_reserved_ips(context, network_id):
"""Return the number of reserved ips in the network."""
return IMPL.network_count_reserved_ips(context, network_id)
def network_create_safe(context, values):
"""Create a network from the values dict.
The network is only returned if the create succeeds. If the create violates
constraints because the network already exists, no exception is raised.
"""
return IMPL.network_create_safe(context, values)
def network_delete_safe(context, network_id):
"""Delete network with key network_id.
This method assumes that the network is not associated with any project
"""
return IMPL.network_delete_safe(context, network_id)
def network_disassociate(context, network_id, disassociate_host=True,
disassociate_project=True):
"""Disassociate the network from project or host and raise if it does
not exist."""
return IMPL.network_disassociate(context, network_id, disassociate_host,
disassociate_project)
def network_get(context, network_id, project_only="allow_none"):
"""Get a network or raise if it does not exist."""
return IMPL.network_get(context, network_id, project_only=project_only)
def network_get_all(context):
"""Return all defined networks."""
return IMPL.network_get_all(context)
def network_get_all_by_uuids(context, network_uuids,
project_only="allow_none"):
"""Return networks by ids."""
return IMPL.network_get_all_by_uuids(context, network_uuids,
project_only=project_only)
# pylint: disable=C0103
def network_in_use_on_host(context, network_id, host=None):
"""Indicates if a network is currently in use on host."""
return IMPL.network_in_use_on_host(context, network_id, host)
def network_get_associated_fixed_ips(context, network_id, host=None):
"""Get all network's ips that have been associated."""
return IMPL.network_get_associated_fixed_ips(context, network_id, host)
def network_get_by_uuid(context, uuid):
"""Get a network by uuid or raise if it does not exist."""
return IMPL.network_get_by_uuid(context, uuid)
def network_get_by_cidr(context, cidr):
"""Get a network by cidr or raise if it does not exist."""
return IMPL.network_get_by_cidr(context, cidr)
def network_get_all_by_instance(context, instance_id):
"""Get all networks by instance id or raise if none exist."""
return IMPL.network_get_all_by_instance(context, instance_id)
def network_get_all_by_host(context, host):
"""All networks for which the given host is the network host."""
return IMPL.network_get_all_by_host(context, host)
def network_set_host(context, network_id, host_id):
"""Safely set the host for network."""
return IMPL.network_set_host(context, network_id, host_id)
def network_update(context, network_id, values):
"""Set the given properties on a network and update it.
Raises NotFound if network does not exist.
"""
return IMPL.network_update(context, network_id, values)
###############
def quota_create(context, project_id, resource, limit):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit)
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
###################
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
def quota_usage_update(context, project_id, resource, **kwargs):
"""Update a quota usage or raise if it does not exist."""
return IMPL.quota_usage_update(context, project_id, resource, **kwargs)
###################
def reservation_create(context, uuid, usage, project_id, resource, delta,
expire):
"""Create a reservation for the given project and resource."""
return IMPL.reservation_create(context, uuid, usage, project_id,
resource, delta, expire)
def reservation_get(context, uuid):
"""Retrieve a reservation or raise if it does not exist."""
return IMPL.reservation_get(context, uuid)
###################
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=project_id)
def reservation_commit(context, reservations, project_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations,
project_id=project_id)
def reservation_rollback(context, reservations, project_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations,
project_id=project_id)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_all_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
###################
def get_ec2_volume_id_by_uuid(context, volume_id):
return IMPL.get_ec2_volume_id_by_uuid(context, volume_id)
def get_volume_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_volume_uuid_by_ec2_id(context, ec2_id)
def ec2_volume_create(context, volume_id, forced_id=None):
return IMPL.ec2_volume_create(context, volume_id, forced_id)
def get_snapshot_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_snapshot_uuid_by_ec2_id(context, ec2_id)
def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
return IMPL.get_ec2_snapshot_id_by_uuid(context, snapshot_id)
def ec2_snapshot_create(context, snapshot_id, forced_id=None):
return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id)
####################
def block_device_mapping_create(context, values):
"""Create an entry of block device mapping."""
return IMPL.block_device_mapping_create(context, values)
def block_device_mapping_update(context, bdm_id, values):
"""Update an entry of block device mapping."""
return IMPL.block_device_mapping_update(context, bdm_id, values)
def block_device_mapping_update_or_create(context, values):
"""Update an entry of block device mapping.
If not existed, create a new entry"""
return IMPL.block_device_mapping_update_or_create(context, values)
def block_device_mapping_get_all_by_instance(context, instance_uuid):
"""Get all block device mapping belonging to an instance."""
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid)
def block_device_mapping_destroy(context, bdm_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy(context, bdm_id)
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_device(
context, instance_uuid, device_name)
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_volume(
context, instance_uuid, volume_id)
####################
def security_group_get_all(context):
"""Get all security groups."""
return IMPL.security_group_get_all(context)
def security_group_get(context, security_group_id):
"""Get security group by its id."""
return IMPL.security_group_get(context, security_group_id)
def security_group_get_by_name(context, project_id, group_name):
"""Returns a security group with the specified name from a project."""
return IMPL.security_group_get_by_name(context, project_id, group_name)
def security_group_get_by_project(context, project_id):
"""Get all security groups belonging to a project."""
return IMPL.security_group_get_by_project(context, project_id)
def security_group_get_by_instance(context, instance_id):
"""Get security groups to which the instance is assigned."""
return IMPL.security_group_get_by_instance(context, instance_id)
def security_group_exists(context, project_id, group_name):
"""Indicates if a group name exists in a project."""
return IMPL.security_group_exists(context, project_id, group_name)
def security_group_in_use(context, group_id):
"""Indicates if a security group is currently in use."""
return IMPL.security_group_in_use(context, group_id)
def security_group_create(context, values):
"""Create a new security group."""
return IMPL.security_group_create(context, values)
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id.
Returns a tuple with the first element being a bool indicating
if the default security group previously existed. Second
element is the dict used to create the default security group.
"""
return IMPL.security_group_ensure_default(context)
def security_group_destroy(context, security_group_id):
"""Deletes a security group."""
return IMPL.security_group_destroy(context, security_group_id)
def security_group_count_by_project(context, project_id, session=None):
"""Count number of security groups in a project."""
return IMPL.security_group_count_by_project(context, project_id,
session=session)
####################
def security_group_rule_create(context, values):
"""Create a new security group."""
return IMPL.security_group_rule_create(context, values)
def security_group_rule_get_by_security_group(context, security_group_id):
"""Get all rules for a given security group."""
return IMPL.security_group_rule_get_by_security_group(context,
security_group_id)
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
"""Get all rules that grant access to the given security group."""
return IMPL.security_group_rule_get_by_security_group_grantee(context,
security_group_id)
def security_group_rule_destroy(context, security_group_rule_id):
"""Deletes a security group rule."""
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
def security_group_rule_get(context, security_group_rule_id):
"""Gets a security group rule."""
return IMPL.security_group_rule_get(context, security_group_rule_id)
def security_group_rule_count_by_group(context, security_group_id):
"""Count rules in a given security group."""
return IMPL.security_group_rule_count_by_group(context, security_group_id)
###################
def security_group_default_rule_get(context, security_group_rule_default_id):
return IMPL.security_group_default_rule_get(context,
security_group_rule_default_id)
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
return IMPL.security_group_default_rule_destroy(
context, security_group_rule_default_id)
def security_group_default_rule_create(context, values):
return IMPL.security_group_default_rule_create(context, values)
def security_group_default_rule_list(context):
return IMPL.security_group_default_rule_list(context)
###################
def provider_fw_rule_create(context, rule):
"""Add a firewall rule at the provider level (all hosts & instances)."""
return IMPL.provider_fw_rule_create(context, rule)
def provider_fw_rule_get_all(context):
"""Get all provider-level firewall rules."""
return IMPL.provider_fw_rule_get_all(context)
def provider_fw_rule_destroy(context, rule_id):
"""Delete a provider firewall rule from the database."""
return IMPL.provider_fw_rule_destroy(context, rule_id)
###################
def project_get_networks(context, project_id, associate=True):
"""Return the network associated with the project.
If associate is true, it will attempt to associate a new
network if one is not found, otherwise it returns None.
"""
return IMPL.project_get_networks(context, project_id, associate)
###################
def console_pool_create(context, values):
"""Create console pool."""
return IMPL.console_pool_create(context, values)
def console_pool_get_by_host_type(context, compute_host, proxy_host,
console_type):
"""Fetch a console pool for a given proxy host, compute host, and type."""
return IMPL.console_pool_get_by_host_type(context,
compute_host,
proxy_host,
console_type)
def console_pool_get_all_by_host_type(context, host, console_type):
"""Fetch all pools for given proxy host and type."""
return IMPL.console_pool_get_all_by_host_type(context,
host,
console_type)
def console_create(context, values):
"""Create a console."""
return IMPL.console_create(context, values)
def console_delete(context, console_id):
"""Delete a console."""
return IMPL.console_delete(context, console_id)
def console_get_by_pool_instance(context, pool_id, instance_uuid):
"""Get console entry for a given instance and pool."""
return IMPL.console_get_by_pool_instance(context, pool_id, instance_uuid)
def console_get_all_by_instance(context, instance_uuid):
"""Get consoles for a given instance."""
return IMPL.console_get_all_by_instance(context, instance_uuid)
def console_get(context, console_id, instance_uuid=None):
"""Get a specific console (possibly on a given instance)."""
return IMPL.console_get(context, console_id, instance_uuid)
##################
def instance_type_create(context, values):
"""Create a new instance type."""
return IMPL.instance_type_create(context, values)
def instance_type_get_all(context, inactive=False, filters=None):
"""Get all instance types."""
return IMPL.instance_type_get_all(
context, inactive=inactive, filters=filters)
def instance_type_get(context, id):
"""Get instance type by id."""
return IMPL.instance_type_get(context, id)
def instance_type_get_by_name(context, name):
"""Get instance type by name."""
return IMPL.instance_type_get_by_name(context, name)
def instance_type_get_by_flavor_id(context, id):
"""Get instance type by flavor id."""
return IMPL.instance_type_get_by_flavor_id(context, id)
def instance_type_destroy(context, name):
"""Delete an instance type."""
return IMPL.instance_type_destroy(context, name)
def instance_type_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access by flavor id."""
return IMPL.instance_type_access_get_by_flavor_id(context, flavor_id)
def instance_type_access_add(context, flavor_id, project_id):
"""Add flavor access for project."""
return IMPL.instance_type_access_add(context, flavor_id, project_id)
def instance_type_access_remove(context, flavor_id, project_id):
"""Remove flavor access for project."""
return IMPL.instance_type_access_remove(context, flavor_id, project_id)
####################
def cell_create(context, values):
"""Create a new child Cell entry."""
return IMPL.cell_create(context, values)
def cell_update(context, cell_name, values):
"""Update a child Cell entry."""
return IMPL.cell_update(context, cell_name, values)
def cell_delete(context, cell_name):
"""Delete a child Cell."""
return IMPL.cell_delete(context, cell_name)
def cell_get(context, cell_name):
"""Get a specific child Cell."""
return IMPL.cell_get(context, cell_name)
def cell_get_all(context):
"""Get all child Cells."""
return IMPL.cell_get_all(context)
####################
def instance_metadata_get_all(context, search_filts):
"""Get all metadata for an instance."""
return IMPL.instance_metadata_get_all(context, search_filts)
def instance_metadata_get(context, instance_uuid):
"""Get all metadata for an instance."""
return IMPL.instance_metadata_get(context, instance_uuid)
def instance_metadata_delete(context, instance_uuid, key):
"""Delete the given metadata item."""
IMPL.instance_metadata_delete(context, instance_uuid, key)
def instance_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.instance_metadata_update(context, instance_uuid,
metadata, delete)
####################
def instance_system_metadata_get(context, instance_uuid):
"""Get all system metadata for an instance."""
return IMPL.instance_system_metadata_get(context, instance_uuid)
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.instance_system_metadata_update(
context, instance_uuid, metadata, delete)
####################
def agent_build_create(context, values):
"""Create a new agent build entry."""
return IMPL.agent_build_create(context, values)
def agent_build_get_by_triple(context, hypervisor, os, architecture):
"""Get agent build by hypervisor/OS/architecture triple."""
return IMPL.agent_build_get_by_triple(context, hypervisor, os,
architecture)
def agent_build_get_all(context, hypervisor=None):
"""Get all agent builds."""
return IMPL.agent_build_get_all(context, hypervisor)
def agent_build_destroy(context, agent_update_id):
"""Destroy agent build entry."""
IMPL.agent_build_destroy(context, agent_update_id)
def agent_build_update(context, agent_build_id, values):
"""Update agent build entry."""
IMPL.agent_build_update(context, agent_build_id, values)
####################
def bw_usage_get(context, uuid, start_period, mac):
"""Return bw usage for instance and mac in a given audit period."""
return IMPL.bw_usage_get(context, uuid, start_period, mac)
def bw_usage_get_by_uuids(context, uuids, start_period):
"""Return bw usages for instance(s) in a given audit period."""
return IMPL.bw_usage_get_by_uuids(context, uuids, start_period)
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None,
update_cells=True):
"""Update cached bandwidth usage for an instance's network based on mac
address. Creates new record if needed.
"""
rv = IMPL.bw_usage_update(context, uuid, mac, start_period, bw_in,
bw_out, last_ctr_in, last_ctr_out, last_refreshed=last_refreshed)
if update_cells:
try:
cells_rpcapi.CellsAPI().bw_usage_update_at_top(context,
uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed)
except Exception:
LOG.exception(_("Failed to notify cells of bw_usage update"))
return rv
####################
def instance_type_extra_specs_get(context, flavor_id):
"""Get all extra specs for an instance type."""
return IMPL.instance_type_extra_specs_get(context, flavor_id)
def instance_type_extra_specs_delete(context, flavor_id, key):
"""Delete the given extra specs item."""
IMPL.instance_type_extra_specs_delete(context, flavor_id, key)
def instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs):
"""Create or update instance type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument"""
IMPL.instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs)
###################
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return IMPL.vol_get_usage_by_time(context, begin)
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id,
last_refreshed=None, update_totals=False):
"""Update cached volume usage for a volume
Creates new record if needed."""
return IMPL.vol_usage_update(context, id, rd_req, rd_bytes, wr_req,
wr_bytes, instance_id, project_id, user_id,
last_refreshed=last_refreshed,
update_totals=update_totals)
###################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
return IMPL.s3_image_get(context, image_id)
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
return IMPL.s3_image_get_by_uuid(context, image_uuid)
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
return IMPL.s3_image_create(context, image_uuid)
####################
def aggregate_create(context, values, metadata=None):
"""Create a new aggregate with metadata."""
return IMPL.aggregate_create(context, values, metadata)
def aggregate_get(context, aggregate_id):
"""Get a specific aggregate by id."""
return IMPL.aggregate_get(context, aggregate_id)
def aggregate_get_by_host(context, host, key=None):
"""Get a list of aggregates that host belongs to."""
return IMPL.aggregate_get_by_host(context, host, key)
def aggregate_metadata_get_by_host(context, host, key=None):
"""Get metadata for all aggregates that host belongs to.
Returns a dictionary where each value is a set, this is to cover the case
where there two aggregates have different values for the same key.
Optional key filter"""
return IMPL.aggregate_metadata_get_by_host(context, host, key)
def aggregate_host_get_by_metadata_key(context, key):
"""Get hosts with a specific metadata key metadata for all aggregates.
Returns a dictionary where each key is a hostname and each value is a set
of the key values
return value: {machine: set( az1, az2 )}
"""
return IMPL.aggregate_host_get_by_metadata_key(context, key)
def aggregate_update(context, aggregate_id, values):
"""Update the attributes of an aggregates. If values contains a metadata
key, it updates the aggregate metadata too."""
return IMPL.aggregate_update(context, aggregate_id, values)
def aggregate_delete(context, aggregate_id):
"""Delete an aggregate."""
return IMPL.aggregate_delete(context, aggregate_id)
def aggregate_get_all(context):
"""Get all aggregates."""
return IMPL.aggregate_get_all(context)
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
"""Add/update metadata. If set_delete=True, it adds only."""
IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete)
def aggregate_metadata_get(context, aggregate_id):
"""Get metadata for the specified aggregate."""
return IMPL.aggregate_metadata_get(context, aggregate_id)
def aggregate_metadata_delete(context, aggregate_id, key):
"""Delete the given metadata key."""
IMPL.aggregate_metadata_delete(context, aggregate_id, key)
def aggregate_host_add(context, aggregate_id, host):
"""Add host to the aggregate."""
IMPL.aggregate_host_add(context, aggregate_id, host)
def aggregate_host_get_all(context, aggregate_id):
"""Get hosts for the specified aggregate."""
return IMPL.aggregate_host_get_all(context, aggregate_id)
def aggregate_host_delete(context, aggregate_id, host):
"""Delete the given host from the aggregate."""
IMPL.aggregate_host_delete(context, aggregate_id, host)
####################
def instance_fault_create(context, values, update_cells=True):
"""Create a new Instance Fault."""
rv = IMPL.instance_fault_create(context, values)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_fault_create_at_top(context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance fault"))
return rv
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids)
####################
def action_start(context, values):
"""Start an action for an instance."""
return IMPL.action_start(context, values)
def action_finish(context, values):
"""Finish an action for an instance."""
return IMPL.action_finish(context, values)
def actions_get(context, uuid):
"""Get all instance actions for the provided instance."""
return IMPL.actions_get(context, uuid)
def action_get_by_request_id(context, uuid, request_id):
"""Get the action by request_id and given instance."""
return IMPL.action_get_by_request_id(context, uuid, request_id)
def action_event_start(context, values):
"""Start an event on an instance action."""
return IMPL.action_event_start(context, values)
def action_event_finish(context, values):
"""Finish an event on an instance action."""
return IMPL.action_event_finish(context, values)
def action_events_get(context, action_id):
"""Get the events by action id."""
return IMPL.action_events_get(context, action_id)
def action_event_get_by_id(context, action_id, event_id):
return IMPL.action_event_get_by_id(context, action_id, event_id)
####################
def get_ec2_instance_id_by_uuid(context, instance_id):
"""Get ec2 id through uuid from instance_id_mappings table."""
return IMPL.get_ec2_instance_id_by_uuid(context, instance_id)
def get_instance_uuid_by_ec2_id(context, ec2_id):
"""Get uuid through ec2 id from instance_id_mappings table."""
return IMPL.get_instance_uuid_by_ec2_id(context, ec2_id)
def ec2_instance_create(context, instance_uuid, id=None):
"""Create the ec2 id to instance uuid mapping on demand."""
return IMPL.ec2_instance_create(context, instance_uuid, id)
####################
def task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message=None):
"""Mark a task as complete for a given host/time period."""
return IMPL.task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message)
def task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items=None,
message=None):
"""Mark a task as started for a given host/time period."""
return IMPL.task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items,
message)
def task_log_get_all(context, task_name, period_beginning,
period_ending, host=None, state=None):
return IMPL.task_log_get_all(context, task_name, period_beginning,
period_ending, host, state)
def task_log_get(context, task_name, period_beginning,
period_ending, host, state=None):
return IMPL.task_log_get(context, task_name, period_beginning,
period_ending, host, state)
####################
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to corresponding shadow
tables.
:returns: number of rows archived.
"""
return IMPL.archive_deleted_rows(context, max_rows=max_rows)
def archive_deleted_rows_for_table(context, tablename, max_rows=None):
"""Move up to max_rows rows from tablename to corresponding shadow
table.
:returns: number of rows archived.
"""
return IMPL.archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows)
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model that process images by extracting only "important" patches."""
import enum
import functools
from typing import Any, Dict, Optional, Tuple
import chex
import einops
import flax.deprecated.nn as nn
import jax
import jax.numpy as jnp
from lib import utils
from lib.layers import transformer
from lib.ops import perturbed_topk
from lib.ops import topk
class SelectionMethod(str, enum.Enum):
SINKHORN_TOPK = "topk"
PERTURBED_TOPK = "perturbed-topk"
HARD_TOPK = "hard-topk"
RANDOM = "random"
class AggregationMethod(str, enum.Enum):
TRANSFORMER = "transformer"
MEANPOOLING = "meanpooling"
MAXPOOLING = "maxpooling"
SUM_LAYERNORM = "sum-layernorm"
class SqueezeExciteLayer(nn.Module):
"""Squeeze and Exite layer from https://arxiv.org/abs/1709.01507."""
def apply(self, x, reduction = 16):
num_channels = x.shape[-1]
y = x.mean(axis=(1, 2))
y = nn.Dense(y, features=num_channels // reduction, bias=False)
y = nn.relu(y)
y = nn.Dense(y, features=num_channels, bias=False)
y = nn.sigmoid(y)
return x * y[:, None, None, :]
class Scorer(nn.Module):
"""Scorer function."""
def apply(self, x, use_squeeze_excite = False):
x = nn.Conv(x, features=8, kernel_size=(3, 3), padding="VALID")
x = nn.relu(x)
x = nn.Conv(x, features=16, kernel_size=(3, 3), padding="VALID")
x = nn.relu(x)
if use_squeeze_excite:
x = SqueezeExciteLayer(x)
x = nn.Conv(x, features=32, kernel_size=(3, 3), padding="VALID")
x = nn.relu(x)
if use_squeeze_excite:
x = SqueezeExciteLayer(x)
x = nn.Conv(x, features=1, kernel_size=(3, 3), padding="VALID")
scores = nn.max_pool(x, window_shape=(8, 8), strides=(8, 8))[Ellipsis, 0]
return scores
@classmethod
def compute_output_size(cls, height, width):
return ((height - 8) // 8, (width - 8) // 8)
class PatchNet(nn.Module):
"""Model that process images by extracting only "important" patches."""
def apply(self,
x,
*,
patch_size,
k,
downscale,
scorer_has_se,
normalization_str = "identity",
selection_method,
selection_method_kwargs = None,
selection_method_inference = None,
patch_dropout = 0.,
hard_topk_probability = 0.,
random_patch_probability = 0.,
use_iterative_extraction,
append_position_to_input,
feature_network,
aggregation_method,
aggregation_method_kwargs = None,
train):
"""Process a high resolution image by selecting a subset of useful patches.
This model processes the input as follow:
1. Compute scores per patch on a downscaled version of the input.
2. Select "important" patches using sampling or top-k methods.
3. Extract the patches from the high-resolution image.
4. Compute representation vector for each patch with a feature network.
5. Aggregate the patch representation to obtain an image representation.
Args:
x: Input tensor of shape (batch, height, witdh, channels).
patch_size: Size of the (squared) patches to extract.
k: Number of patches to extract per image.
downscale: Downscale multiplier for the input of the scorer network.
scorer_has_se: Whether scorer network has Squeeze-excite layers.
normalization_str: String specifying the normalization of the scores.
selection_method: Method that selects which patches should be extracted,
based on their scores. Either returns indices (hard selection) or
indicators vectors (which could yield interpolated patches).
selection_method_kwargs: Keyword args for the selection_method.
selection_method_inference: Selection method used at inference.
patch_dropout: Probability to replace a patch by 0 values.
hard_topk_probability: Probability to use the true topk on the scores to
select the patches. This operation has no gradient so scorer's weights
won't be trained.
random_patch_probability: Probability to replace each patch by a random
patch in the image during training.
use_iterative_extraction: If True, uses a for loop instead of patch
indexing for memory efficiency.
append_position_to_input: Append normalized (height, width) position to
the channels of the input.
feature_network: Network to be applied on each patch individually to
obtain patch representation vectors.
aggregation_method: Method to aggregate the representations of the k
patches of each image to obtain the image representation.
aggregation_method_kwargs: Keywords arguments for aggregation_method.
train: If the model is being trained. Disable dropout otherwise.
Returns:
A representation vector for each image in the batch.
"""
selection_method = SelectionMethod(selection_method)
aggregation_method = AggregationMethod(aggregation_method)
if selection_method_inference:
selection_method_inference = SelectionMethod(selection_method_inference)
selection_method_kwargs = selection_method_kwargs or {}
aggregation_method_kwargs = aggregation_method_kwargs or {}
stats = {}
# Compute new dimension of the scoring image.
b, h, w, c = x.shape
scoring_shape = (b, h // downscale, w // downscale, c)
# === Compute the scores with a small CNN.
if selection_method == SelectionMethod.RANDOM:
scores_h, scores_w = Scorer.compute_output_size(h // downscale,
w // downscale)
num_patches = scores_h * scores_w
else:
# Downscale input to run scorer on.
scoring_x = jax.image.resize(x, scoring_shape, method="bilinear")
scores = Scorer(scoring_x, use_squeeze_excite=scorer_has_se,
name="scorer")
flatten_scores = einops.rearrange(scores, "b h w -> b (h w)")
num_patches = flatten_scores.shape[-1]
scores_h, scores_w = scores.shape[1:3]
# Compute entropy before normalization
prob_scores = jax.nn.softmax(flatten_scores)
stats["entropy_before_normalization"] = jax.scipy.special.entr(
prob_scores).sum(axis=1).mean(axis=0)
# Normalize the flatten scores
normalization_fn = create_normalization_fn(normalization_str)
flatten_scores = normalization_fn(flatten_scores)
scores = flatten_scores.reshape(scores.shape)
stats["scores"] = scores[Ellipsis, None]
# Concatenate height and width position to the input channels.
if append_position_to_input:
coords = utils.create_grid([h, w], value_range=(0., 1.))
x = jnp.concatenate([x, coords[jnp.newaxis, Ellipsis].repeat(b, axis=0)],
axis=-1)
c += 2
# Overwrite the selection method at inference
if selection_method_inference and not train:
selection_method = selection_method_inference
# === Patch selection
# Select the patches by sampling or top-k. Some methods returns the indices
# of the selected patches, other methods return indicator vectors.
extract_by_indices = selection_method in [SelectionMethod.HARD_TOPK,
SelectionMethod.RANDOM]
if selection_method is SelectionMethod.SINKHORN_TOPK:
indicators = select_patches_sinkhorn_topk(
flatten_scores, k=k, **selection_method_kwargs)
elif selection_method is SelectionMethod.PERTURBED_TOPK:
sigma = selection_method_kwargs["sigma"]
num_samples = selection_method_kwargs["num_samples"]
sigma *= self.state("sigma_mutiplier", shape=(),
initializer=nn.initializers.ones).value
stats["sigma"] = sigma
indicators = select_patches_perturbed_topk(
flatten_scores, k=k, sigma=sigma, num_samples=num_samples)
elif selection_method is SelectionMethod.HARD_TOPK:
indices = select_patches_hard_topk(flatten_scores, k=k)
elif selection_method is SelectionMethod.RANDOM:
batch_random_indices_fn = jax.vmap(functools.partial(
jax.random.choice, a=num_patches, shape=(k,), replace=False))
indices = batch_random_indices_fn(jax.random.split(nn.make_rng(), b))
# Compute scores entropy for regularization
if selection_method not in [SelectionMethod.RANDOM]:
prob_scores = flatten_scores
# Normalize the scores if it is not already done.
if "softmax" not in normalization_str:
prob_scores = jax.nn.softmax(prob_scores)
stats["entropy"] = jax.scipy.special.entr(
prob_scores).sum(axis=1).mean(axis=0)
# Randomly use hard topk at training.
if (train and
hard_topk_probability > 0 and
selection_method not in [SelectionMethod.HARD_TOPK,
SelectionMethod.RANDOM]):
true_indices = select_patches_hard_topk(flatten_scores, k=k)
random_values = jax.random.uniform(nn.make_rng(), (b,))
use_hard = random_values < hard_topk_probability
if extract_by_indices:
indices = jnp.where(use_hard[:, None], true_indices, indices)
else:
true_indicators = make_indicators(true_indices, num_patches)
indicators = jnp.where(use_hard[:, None, None],
true_indicators, indicators)
# Sample some random patches during training with random_patch_probability.
if (train and
random_patch_probability > 0 and
selection_method is not SelectionMethod.RANDOM):
single_random_patches = functools.partial(
jax.random.choice, a=num_patches, shape=(k,), replace=False)
random_indices = jax.vmap(single_random_patches)(
jax.random.split(nn.make_rng(), b))
random_values = jax.random.uniform(nn.make_rng(), (b, k))
use_random = random_values < random_patch_probability
if extract_by_indices:
indices = jnp.where(use_random, random_indices, indices)
else:
random_indicators = make_indicators(random_indices,
num_patches)
indicators = jnp.where(use_random[:, None, :],
random_indicators, indicators)
# === Patch extraction
if extract_by_indices:
patches = extract_patches_from_indices(
x, indices, patch_size=patch_size,
grid_shape=(scores_h, scores_w))
indicators = make_indicators(indices, num_patches)
else:
patches = extract_patches_from_indicators(
x, indicators, patch_size, grid_shape=(scores_h, scores_w),
iterative=use_iterative_extraction, patch_dropout=patch_dropout,
train=train)
chex.assert_shape(patches, (b, k, patch_size, patch_size, c))
stats["extracted_patches"] = einops.rearrange(patches,
"b k i j c -> b i (k j) c")
# Remove position channels for plotting.
if append_position_to_input:
stats["extracted_patches"] = (stats["extracted_patches"][Ellipsis, :-2])
# === Compute patch features
flatten_patches = einops.rearrange(patches, "b k i j c -> (b k) i j c")
representations = feature_network(flatten_patches, train=train)
if representations.ndim > 2:
collapse_axis = tuple(range(1, representations.ndim - 1))
representations = representations.mean(axis=collapse_axis)
representations = einops.rearrange(representations,
"(b k) d -> b k d", k=k)
stats["patch_representations"] = representations
# === Aggregate the k patches
# - for sampling we are forced to take an expectation
# - for topk we have multiple options: mean, max, transformer.
if aggregation_method is AggregationMethod.TRANSFORMER:
patch_pos_encoding = nn.Dense(einops.rearrange(indicators,
"b d k -> b k d"),
features=representations.shape[-1])
chex.assert_equal_shape([representations, patch_pos_encoding])
representations += patch_pos_encoding
representations = transformer.Transformer(
representations, **aggregation_method_kwargs, is_training=train)
elif aggregation_method is AggregationMethod.MEANPOOLING:
representations = representations.mean(axis=1)
elif aggregation_method is AggregationMethod.MAXPOOLING:
representations = representations.max(axis=1)
elif aggregation_method is AggregationMethod.SUM_LAYERNORM:
representations = representations.sum(axis=1)
representations = nn.LayerNorm(representations)
representations = nn.Dense(representations,
features=representations.shape[-1],
name="classification_dense1")
representations = nn.swish(representations)
return representations, stats
def select_patches_perturbed_topk(flatten_scores,
sigma,
*,
k,
num_samples = 1000):
"""Select patches using a differentiable top-k based on perturbation.
Uses https://q-berthet.github.io/papers/BerBloTeb20.pdf,
see off_the_grid.lib.ops.perturbed_topk for more info.
Args:
flatten_scores: The flatten scores of shape (batch, num_patches).
sigma: Standard deviation of the noise.
k: The number of patches to extract.
num_samples: Number of noisy inputs used to compute the output expectation.
Returns:
Indicator vectors of the selected patches (batch, num_patches, k).
"""
batch_size = flatten_scores.shape[0]
batch_topk_fn = jax.vmap(
functools.partial(perturbed_topk.perturbed_sorted_topk_indicators,
num_samples=num_samples,
sigma=sigma,
k=k))
rng_keys = jax.random.split(nn.make_rng(), batch_size)
indicators = batch_topk_fn(flatten_scores, rng_keys)
topk_indicators_flatten = einops.rearrange(indicators, "b k d -> b d k")
return topk_indicators_flatten
def select_patches_sinkhorn_topk(flatten_scores,
*,
k,
epsilon,
num_iterations):
"""Select patches using a differentiable top-k based on sinkhorn.
Uses https://arxiv.org/abs/2002.06504, see lib.ops.topk for more
info.
Args:
flatten_scores: The flatten scores of shape (batch, num_patches).
k: The number of patches to extract.
epsilon: Temperature of sinkhorn.
num_iterations: Number of iterations of sinkhorn.
Returns:
Indicator vectors of the selected patches (batch, num_patches, k).
"""
batch_topk_fn = jax.vmap(
functools.partial(topk.differentiable_smooth_sorted_top_k,
k=k, epsilon=epsilon, num_iterations=num_iterations))
topk_indicators_flatten = batch_topk_fn(flatten_scores)
return topk_indicators_flatten
def select_patches_hard_topk(flatten_scores, *, k):
"""Return the indices of the k patches with highest `flatten_scores`."""
indices = jax.lax.top_k(flatten_scores, k)[1]
# Naive sorting commented below was ~10% slower on TPU.
# indices = jnp.argsort(flatten_scores)[:, -k:]
return indices
def extract_patches_from_indicators(x,
indicators,
patch_size,
patch_dropout,
grid_shape,
train,
iterative = False):
"""Extract patches from a batch of images.
Args:
x: The batch of images of shape (batch, height, width, channels).
indicators: The one hot indicators of shape (batch, num_patches, k).
patch_size: The size of the (squared) patches to extract.
patch_dropout: Probability to replace a patch by 0 values.
grid_shape: Pair of height, width of the disposition of the num_patches
patches.
train: If the model is being trained. Disable dropout if not.
iterative: If True, etracts the patches with a for loop rather than
instanciating the "all patches" tensor and extracting by dotproduct with
indicators. `iterative` is more memory efficient.
Returns:
The patches extracted from x with shape
(batch, k, patch_size, patch_size, channels).
"""
batch_size, height, width, channels = x.shape
scores_h, scores_w = grid_shape
k = indicators.shape[-1]
indicators = einops.rearrange(indicators, "b (h w) k -> b k h w",
h=scores_h, w=scores_w)
scale_height = height // scores_h
scale_width = width // scores_w
padded_height = scale_height * scores_h + patch_size - 1
padded_width = scale_width * scores_w + patch_size - 1
top_pad = (patch_size - scale_height) // 2
left_pad = (patch_size - scale_width) // 2
bottom_pad = padded_height - top_pad - height
right_pad = padded_width - left_pad - width
# TODO(jbcdnr): assert padding is positive.
padded_x = jnp.pad(x,
[(0, 0),
(top_pad, bottom_pad),
(left_pad, right_pad),
(0, 0)])
# Extract the patches. Iterative fits better in memory as it does not
# instanciate the "all patches" tensor but iterate over them to compute the
# weighted sum with the indicator variables from topk.
if not iterative:
assert patch_dropout == 0., "Patch dropout not implemented."
patches = utils.extract_images_patches(
padded_x,
window_size=(patch_size, patch_size),
stride=(scale_height, scale_width))
shape = (batch_size, scores_h, scores_w, patch_size, patch_size, channels)
chex.assert_shape(patches, shape)
patches = jnp.einsum("b k h w, b h w i j c -> b k i j c",
indicators, patches)
else:
mask = jnp.ones((batch_size, scores_h, scores_w))
mask = nn.dropout(mask, patch_dropout, deterministic=not train)
def accumulate_patches(acc, index_i_j):
i, j = index_i_j
patch = jax.lax.dynamic_slice(
padded_x,
(0, i * scale_height, j * scale_width, 0),
(batch_size, patch_size, patch_size, channels))
weights = indicators[:, :, i, j]
is_masked = mask[:, i, j]
weighted_patch = jnp.einsum("b, bk, bijc -> bkijc",
is_masked, weights, patch)
chex.assert_equal_shape([acc, weighted_patch])
acc += weighted_patch
return acc, None
indices = jnp.stack(
jnp.meshgrid(jnp.arange(scores_h), jnp.arange(scores_w), indexing="ij"),
axis=-1)
indices = indices.reshape((-1, 2))
init_patches = jnp.zeros((batch_size, k, patch_size, patch_size, channels))
patches, _ = jax.lax.scan(accumulate_patches, init_patches, indices)
return patches
def extract_patches_from_indices(x,
indices,
patch_size,
grid_shape):
"""Extract patches from a batch of images.
Args:
x: The batch of images of shape (batch, height, width, channels).
indices: The indices of the flatten patches to extract of shape
(batch, k).
patch_size: The size of the (squared) patches to extract.
grid_shape: Pair of height, width of the disposition of the num_patches
patches.
Returns:
The patches extracted from x with shape
(batch, k, patch_size, patch_size, channels).
"""
_, height, width, _ = x.shape
scores_h, scores_w = grid_shape
scale_height = height // scores_h
scale_width = width // scores_w
h_padding = (patch_size - scale_height) // 2
w_padding = (patch_size - scale_width) // 2
height_indices = (indices // scores_w) * scale_height
width_indices = (indices % scores_w) * scale_width
padded_x = jnp.pad(x,
[(0, 0),
(h_padding, h_padding),
(w_padding, w_padding),
(0, 0)])
@jax.vmap
@functools.partial(jax.vmap, in_axes=(None, 0, 0))
def patch(image, i, j):
# Equivalent to image[i:i+patch_size, j:j+patch_size, :]
return jax.lax.dynamic_slice(image,
(i, j, 0),
(patch_size, patch_size, image.shape[-1]))
patches = patch(padded_x, height_indices, width_indices)
return patches
def _get_available_normalization_fns():
"""Defines functions available in normalization function strings."""
def smoothing(s):
def smoothing_fn(x):
uniform = 1. / x.shape[-1]
x = x * (1 - s) + uniform * s
return x
return smoothing_fn
def zeroone(scores):
scores -= scores.min(axis=1, keepdims=True)
scores /= scores.max(axis=1, keepdims=True)
return scores
def zerooneeps(eps):
def zerooneeps_fn(scores):
scores_min = scores.min(axis=-1, keepdims=True)
scores_max = scores.max(axis=-1, keepdims=True)
return (scores - scores_min) / (scores_max - scores_min + eps)
return zerooneeps_fn
return dict(identity=lambda x: x,
softmax=jax.nn.softmax,
smoothing=smoothing,
zeroone=zeroone,
sigmoid=jax.nn.sigmoid,
layernorm=nn.LayerNorm,
zerooneeps=zerooneeps)
def create_normalization_fn(fn_str):
"""Create a normalization function from a string representation.
The syntax is similar to data preprocessing strings. Functions are specified
by name with parameters and chained with | character. Available functions are
specified in _get_available_normalization_fns.
Example:
"softmax|smoothing(0.1)" will give `smoothing(softamax(x), 0.1)`.
Args:
fn_str: The function definition string.
Returns:
The function specified by the string.
"""
functions = [eval(f, _get_available_normalization_fns()) # pylint:disable=eval-used
for f in fn_str.split("|") if f]
def chain(x):
for f in functions:
x = f(x)
return x
return chain
@jax.vmap
def batch_gather(x, indices):
return x[indices, Ellipsis]
@functools.partial(jax.vmap, in_axes=[0, None])
@functools.partial(jax.vmap, in_axes=[0, None], out_axes=1)
def make_indicators(indices, num_classes):
"""Create one hot associated to indices.
Args:
indices: Tensor of indices of dimension (batch, k).
num_classes: The number of classes to represent in the one hot vectors.
Returns:
The one hot indicators associated to indices of shape
(batch, num_classes, k).
"""
return jax.nn.one_hot(indices, num_classes)
| |
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants as n_consts
import testtools
import textwrap
from neutron.agent.linux import keepalived
from neutron.tests import base
# Keepalived user guide:
# http://www.keepalived.org/pdf/UserGuide.pdf
KEEPALIVED_GLOBAL_CONFIG = textwrap.dedent("""\
global_defs {
notification_email_from %(email_from)s
router_id %(router_id)s
}""") % dict(
email_from=keepalived.KEEPALIVED_EMAIL_FROM,
router_id=keepalived.KEEPALIVED_ROUTER_ID)
class KeepalivedGetFreeRangeTestCase(base.BaseTestCase):
def test_get_free_range(self):
free_range = keepalived.get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=['169.254.0.0/24',
'169.254.1.0/24',
'169.254.2.0/24'],
size=24)
self.assertEqual('169.254.3.0/24', free_range)
def test_get_free_range_without_excluded(self):
free_range = keepalived.get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=[],
size=20)
self.assertEqual('169.254.0.0/20', free_range)
def test_get_free_range_excluded_out_of_parent(self):
free_range = keepalived.get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=['255.255.255.0/24'],
size=24)
self.assertEqual('169.254.0.0/24', free_range)
def test_get_free_range_not_found(self):
tiny_parent_range = '192.168.1.0/24'
huge_size = 8
with testtools.ExpectedException(ValueError):
keepalived.get_free_range(
parent_range=tiny_parent_range,
excluded_ranges=[],
size=huge_size)
class KeepalivedConfBaseMixin(object):
def _get_config(self):
config = keepalived.KeepalivedConf()
instance1 = keepalived.KeepalivedInstance('MASTER', 'eth0', 1,
['169.254.192.0/18'],
advert_int=5)
instance1.set_authentication('AH', 'pass123')
instance1.track_interfaces.append("eth0")
vip_address1 = keepalived.KeepalivedVipAddress('192.168.1.0/24',
'eth1')
vip_address2 = keepalived.KeepalivedVipAddress('192.168.2.0/24',
'eth2')
vip_address3 = keepalived.KeepalivedVipAddress('192.168.3.0/24',
'eth2')
vip_address_ex = keepalived.KeepalivedVipAddress('192.168.55.0/24',
'eth10')
instance1.vips.append(vip_address1)
instance1.vips.append(vip_address2)
instance1.vips.append(vip_address3)
instance1.vips.append(vip_address_ex)
virtual_route = keepalived.KeepalivedVirtualRoute(n_consts.IPv4_ANY,
"192.168.1.1",
"eth1")
instance1.virtual_routes.gateway_routes = [virtual_route]
instance2 = keepalived.KeepalivedInstance('MASTER', 'eth4', 2,
['169.254.192.0/18'],
mcast_src_ip='224.0.0.1')
instance2.track_interfaces.append("eth4")
vip_address1 = keepalived.KeepalivedVipAddress('192.168.3.0/24',
'eth6')
instance2.vips.append(vip_address1)
instance2.vips.append(vip_address2)
instance2.vips.append(vip_address_ex)
config.add_instance(instance1)
config.add_instance(instance2)
return config
class KeepalivedConfTestCase(base.BaseTestCase,
KeepalivedConfBaseMixin):
expected = KEEPALIVED_GLOBAL_CONFIG + textwrap.dedent("""
vrrp_instance VR_1 {
state MASTER
interface eth0
virtual_router_id 1
priority 50
garp_master_delay 60
advert_int 5
authentication {
auth_type AH
auth_pass pass123
}
track_interface {
eth0
}
virtual_ipaddress {
169.254.0.1/24 dev eth0
}
virtual_ipaddress_excluded {
192.168.1.0/24 dev eth1
192.168.2.0/24 dev eth2
192.168.3.0/24 dev eth2
192.168.55.0/24 dev eth10
}
virtual_routes {
0.0.0.0/0 via 192.168.1.1 dev eth1
}
}
vrrp_instance VR_2 {
state MASTER
interface eth4
virtual_router_id 2
priority 50
garp_master_delay 60
mcast_src_ip 224.0.0.1
track_interface {
eth4
}
virtual_ipaddress {
169.254.0.2/24 dev eth4
}
virtual_ipaddress_excluded {
192.168.2.0/24 dev eth2
192.168.3.0/24 dev eth6
192.168.55.0/24 dev eth10
}
}""")
def test_config_generation(self):
config = self._get_config()
self.assertEqual(self.expected, config.get_config_str())
def test_config_with_reset(self):
config = self._get_config()
self.assertEqual(self.expected, config.get_config_str())
config.reset()
self.assertEqual(KEEPALIVED_GLOBAL_CONFIG, config.get_config_str())
def test_get_existing_vip_ip_addresses_returns_list(self):
config = self._get_config()
instance = config.get_instance(1)
current_vips = sorted(instance.get_existing_vip_ip_addresses('eth2'))
self.assertEqual(['192.168.2.0/24', '192.168.3.0/24'], current_vips)
class KeepalivedStateExceptionTestCase(base.BaseTestCase):
def test_state_exception(self):
invalid_vrrp_state = 'a seal walks'
self.assertRaises(keepalived.InvalidInstanceStateException,
keepalived.KeepalivedInstance,
invalid_vrrp_state, 'eth0', 33,
['169.254.192.0/18'])
invalid_auth_type = 'into a club'
instance = keepalived.KeepalivedInstance('MASTER', 'eth0', 1,
['169.254.192.0/18'])
self.assertRaises(keepalived.InvalidAuthenticationTypeException,
instance.set_authentication,
invalid_auth_type, 'some_password')
class KeepalivedInstanceRoutesTestCase(base.BaseTestCase):
@classmethod
def _get_instance_routes(cls):
routes = keepalived.KeepalivedInstanceRoutes()
default_gw_eth0 = keepalived.KeepalivedVirtualRoute(
'0.0.0.0/0', '1.0.0.254', 'eth0')
default_gw_eth1 = keepalived.KeepalivedVirtualRoute(
'::/0', 'fe80::3e97:eff:fe26:3bfa/64', 'eth1')
routes.gateway_routes = [default_gw_eth0, default_gw_eth1]
extra_routes = [
keepalived.KeepalivedVirtualRoute('10.0.0.0/8', '1.0.0.1'),
keepalived.KeepalivedVirtualRoute('20.0.0.0/8', '2.0.0.2')]
routes.extra_routes = extra_routes
extra_subnets = [
keepalived.KeepalivedVirtualRoute(
'30.0.0.0/8', None, 'eth0', scope='link')]
routes.extra_subnets = extra_subnets
return routes
def test_routes(self):
routes = self._get_instance_routes()
self.assertEqual(len(routes.routes), 5)
def test_remove_routes_on_interface(self):
routes = self._get_instance_routes()
routes.remove_routes_on_interface('eth0')
self.assertEqual(len(routes.routes), 3)
routes.remove_routes_on_interface('eth1')
self.assertEqual(len(routes.routes), 2)
def test_build_config(self):
expected = """ virtual_routes {
0.0.0.0/0 via 1.0.0.254 dev eth0
::/0 via fe80::3e97:eff:fe26:3bfa/64 dev eth1
10.0.0.0/8 via 1.0.0.1
20.0.0.0/8 via 2.0.0.2
30.0.0.0/8 dev eth0 scope link
}"""
routes = self._get_instance_routes()
self.assertEqual(expected, '\n'.join(routes.build_config()))
class KeepalivedInstanceTestCase(base.BaseTestCase,
KeepalivedConfBaseMixin):
def test_get_primary_vip(self):
instance = keepalived.KeepalivedInstance('MASTER', 'ha0', 42,
['169.254.192.0/18'])
self.assertEqual('169.254.0.42/24', instance.get_primary_vip())
def test_remove_addresses_by_interface(self):
config = self._get_config()
instance = config.get_instance(1)
instance.remove_vips_vroutes_by_interface('eth2')
instance.remove_vips_vroutes_by_interface('eth10')
expected = KEEPALIVED_GLOBAL_CONFIG + textwrap.dedent("""
vrrp_instance VR_1 {
state MASTER
interface eth0
virtual_router_id 1
priority 50
garp_master_delay 60
advert_int 5
authentication {
auth_type AH
auth_pass pass123
}
track_interface {
eth0
}
virtual_ipaddress {
169.254.0.1/24 dev eth0
}
virtual_ipaddress_excluded {
192.168.1.0/24 dev eth1
}
virtual_routes {
0.0.0.0/0 via 192.168.1.1 dev eth1
}
}
vrrp_instance VR_2 {
state MASTER
interface eth4
virtual_router_id 2
priority 50
garp_master_delay 60
mcast_src_ip 224.0.0.1
track_interface {
eth4
}
virtual_ipaddress {
169.254.0.2/24 dev eth4
}
virtual_ipaddress_excluded {
192.168.2.0/24 dev eth2
192.168.3.0/24 dev eth6
192.168.55.0/24 dev eth10
}
}""")
self.assertEqual(expected, config.get_config_str())
def test_build_config_no_vips(self):
expected = textwrap.dedent("""\
vrrp_instance VR_1 {
state MASTER
interface eth0
virtual_router_id 1
priority 50
garp_master_delay 60
virtual_ipaddress {
169.254.0.1/24 dev eth0
}
}""")
instance = keepalived.KeepalivedInstance(
'MASTER', 'eth0', 1, ['169.254.192.0/18'])
self.assertEqual(expected, '\n'.join(instance.build_config()))
class KeepalivedVipAddressTestCase(base.BaseTestCase):
def test_vip_with_scope(self):
vip = keepalived.KeepalivedVipAddress('fe80::3e97:eff:fe26:3bfa/64',
'eth1',
'link')
self.assertEqual('fe80::3e97:eff:fe26:3bfa/64 dev eth1 scope link',
vip.build_config())
def test_add_vip_idempotent(self):
instance = keepalived.KeepalivedInstance('MASTER', 'eth0', 1,
['169.254.192.0/18'])
instance.add_vip('192.168.222.1/32', 'eth11', None)
instance.add_vip('192.168.222.1/32', 'eth12', 'link')
self.assertEqual(1, len(instance.vips))
class KeepalivedVirtualRouteTestCase(base.BaseTestCase):
def test_virtual_route_with_dev(self):
route = keepalived.KeepalivedVirtualRoute(n_consts.IPv4_ANY, '1.2.3.4',
'eth0')
self.assertEqual('0.0.0.0/0 via 1.2.3.4 dev eth0',
route.build_config())
def test_virtual_route_without_dev(self):
route = keepalived.KeepalivedVirtualRoute('50.0.0.0/8', '1.2.3.4')
self.assertEqual('50.0.0.0/8 via 1.2.3.4', route.build_config())
| |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import optparse
import os
import shlex
import sys
from telemetry.core import browser_finder
from telemetry.core import profile_types
from telemetry.core import util
from telemetry.core import wpr_modes
from telemetry.core.platform.profiler import profiler_finder
util.AddDirToPythonPath(
util.GetChromiumSrcDir(), 'third_party', 'webpagereplay')
import net_configs # pylint: disable=F0401
class BrowserFinderOptions(optparse.Values):
"""Options to be used for discovering a browser."""
def __init__(self, browser_type=None):
optparse.Values.__init__(self)
self.browser_type = browser_type
self.browser_executable = None
self.chrome_root = None
self.android_device = None
self.cros_ssh_identity = None
self.extensions_to_load = []
# If set, copy the generated profile to this path on exit.
self.output_profile_path = None
self.cros_remote = None
self.profiler = None
self.verbosity = 0
self.browser_options = BrowserOptions()
self.output_file = None
self.skip_navigate_on_repeat = False
self.android_rndis = False
def __repr__(self):
return str(sorted(self.__dict__.items()))
def Copy(self):
return copy.deepcopy(self)
def CreateParser(self, *args, **kwargs):
parser = optparse.OptionParser(*args, **kwargs)
# Selection group
group = optparse.OptionGroup(parser, 'Which browser to use')
group.add_option('--browser',
dest='browser_type',
default=None,
help='Browser type to run, '
'in order of priority. Supported values: list,%s' %
','.join(browser_finder.ALL_BROWSER_TYPES))
group.add_option('--browser-executable',
dest='browser_executable',
help='The exact browser to run.')
group.add_option('--chrome-root',
dest='chrome_root',
help='Where to look for chrome builds.'
'Defaults to searching parent dirs by default.')
group.add_option('--device',
dest='android_device',
help='The android device ID to use'
'If not specified, only 0 or 1 connected devcies are supported.')
group.add_option(
'--remote',
dest='cros_remote',
help='The IP address of a remote ChromeOS device to use.')
identity = None
testing_rsa = os.path.join(
util.GetChromiumSrcDir(),
'third_party', 'chromite', 'ssh_keys', 'testing_rsa')
if os.path.exists(testing_rsa):
identity = testing_rsa
group.add_option('--identity',
dest='cros_ssh_identity',
default=identity,
help='The identity file to use when ssh\'ing into the ChromeOS device')
parser.add_option_group(group)
# Debugging options
group = optparse.OptionGroup(parser, 'When things go wrong')
profiler_choices = profiler_finder.GetAllAvailableProfilers()
group.add_option(
'--profiler', default=None, type='choice',
choices=profiler_choices,
help='Record profiling data using this tool. Supported values: ' +
', '.join(profiler_choices))
group.add_option(
'--interactive', dest='interactive', action='store_true',
help='Let the user interact with the page; the actions specified for '
'the page are not run.')
group.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed)')
group.add_option('--print-bootstrap-deps',
action='store_true',
help='Output bootstrap deps list.')
parser.add_option_group(group)
# Platform options
group = optparse.OptionGroup(parser, 'Platform options')
group.add_option('--no-performance-mode', action='store_true',
help='Some platforms run on "full performance mode" where the '
'test is executed at maximum CPU speed in order to minimize noise '
'(specially important for dashboards / continuous builds). '
'This option prevents Telemetry from tweaking such platform settings.')
group.add_option('--android-rndis', dest='android_rndis', default=False,
action='store_true', help='Use RNDIS forwarding on Android.')
group.add_option('--no-android-rndis', dest='android_rndis',
action='store_false', help='Do not use RNDIS forwarding on Android.'
' [default]')
parser.add_option_group(group)
# Browser options.
self.browser_options.AddCommandLineArgs(parser)
real_parse = parser.parse_args
def ParseArgs(args=None):
defaults = parser.get_default_values()
for k, v in defaults.__dict__.items():
if k in self.__dict__ and self.__dict__[k] != None:
continue
self.__dict__[k] = v
ret = real_parse(args, self) # pylint: disable=E1121
if self.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif self.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
if self.browser_executable and not self.browser_type:
self.browser_type = 'exact'
if self.browser_type == 'list':
try:
types = browser_finder.GetAllAvailableBrowserTypes(self)
except browser_finder.BrowserFinderException, ex:
sys.stderr.write('ERROR: ' + str(ex))
sys.exit(1)
sys.stdout.write('Available browsers:\n')
sys.stdout.write(' %s\n' % '\n '.join(types))
sys.exit(0)
# Parse browser options.
self.browser_options.UpdateFromParseResults(self)
return ret
parser.parse_args = ParseArgs
return parser
def AppendExtraBrowserArgs(self, args):
self.browser_options.AppendExtraBrowserArgs(args)
def MergeDefaultValues(self, defaults):
for k, v in defaults.__dict__.items():
self.ensure_value(k, v)
class BrowserOptions(object):
"""Options to be used for launching a browser."""
def __init__(self):
self.browser_type = None
self.show_stdout = False
# When set to True, the browser will use the default profile. Telemetry
# will not provide an alternate profile directory.
self.dont_override_profile = False
self.profile_dir = None
self.profile_type = None
self._extra_browser_args = set()
self.extra_wpr_args = []
self.wpr_mode = wpr_modes.WPR_OFF
self.netsim = None
self.no_proxy_server = False
self.browser_user_agent_type = None
self.clear_sytem_cache_for_browser_and_profile_on_start = False
self.startup_url = None
# Background pages of built-in component extensions can interfere with
# performance measurements.
self.disable_component_extensions_with_background_pages = True
# Whether to use the new code path for choosing an ephemeral port for
# DevTools. The bots set this to true. When Chrome 37 reaches stable,
# remove this setting and the old code path. http://crbug.com/379980
self.use_devtools_active_port = False
def __repr__(self):
return str(sorted(self.__dict__.items()))
@classmethod
def AddCommandLineArgs(cls, parser):
############################################################################
# Please do not add any more options here without first discussing with #
# a telemetry owner. This is not the right place for platform-specific #
# options. #
############################################################################
group = optparse.OptionGroup(parser, 'Browser options')
profile_choices = profile_types.GetProfileTypes()
group.add_option('--profile-type',
dest='profile_type',
type='choice',
default='clean',
choices=profile_choices,
help=('The user profile to use. A clean profile is used by default. '
'Supported values: ' + ', '.join(profile_choices)))
group.add_option('--profile-dir',
dest='profile_dir',
help='Profile directory to launch the browser with. '
'A clean profile is used by default')
group.add_option('--extra-browser-args',
dest='extra_browser_args_as_string',
help='Additional arguments to pass to the browser when it starts')
group.add_option('--extra-wpr-args',
dest='extra_wpr_args_as_string',
help=('Additional arguments to pass to Web Page Replay. '
'See third_party/webpagereplay/replay.py for usage.'))
group.add_option('--netsim', default=None, type='choice',
choices=net_configs.NET_CONFIG_NAMES,
help=('Run benchmark under simulated network conditions. '
'Will prompt for sudo. Supported values: ' +
', '.join(net_configs.NET_CONFIG_NAMES)))
group.add_option('--show-stdout',
action='store_true',
help='When possible, will display the stdout of the process')
# This hidden option is to be removed, and the older code path deleted,
# once Chrome 37 reaches Stable. http://crbug.com/379980
group.add_option('--use-devtools-active-port',
action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Compatibility options')
group.add_option('--gtest_output',
help='Ignored argument for compatibility with runtest.py harness')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Synthetic gesture options')
synthetic_gesture_source_type_choices = [ 'default', 'mouse', 'touch' ]
group.add_option('--synthetic-gesture-source-type',
dest='synthetic_gesture_source_type',
default='default', type='choice',
choices=synthetic_gesture_source_type_choices,
help='Specify the source type for synthetic gestures. Note that some ' +
'actions only support a specific source type. ' +
'Supported values: ' +
', '.join(synthetic_gesture_source_type_choices))
parser.add_option_group(group)
def UpdateFromParseResults(self, finder_options):
"""Copies our options from finder_options"""
browser_options_list = [
'extra_browser_args_as_string',
'extra_wpr_args_as_string',
'netsim',
'profile_dir',
'profile_type',
'show_stdout',
'synthetic_gesture_source_type',
'use_devtools_active_port',
]
for o in browser_options_list:
a = getattr(finder_options, o, None)
if a is not None:
setattr(self, o, a)
delattr(finder_options, o)
self.browser_type = finder_options.browser_type
if hasattr(self, 'extra_browser_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_browser_args_as_string) # pylint: disable=E1101
self.AppendExtraBrowserArgs(tmp)
delattr(self, 'extra_browser_args_as_string')
if hasattr(self, 'extra_wpr_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_wpr_args_as_string) # pylint: disable=E1101
self.extra_wpr_args.extend(tmp)
delattr(self, 'extra_wpr_args_as_string')
if self.profile_type == 'default':
self.dont_override_profile = True
if self.profile_dir and self.profile_type != 'clean':
logging.critical(
"It's illegal to specify both --profile-type and --profile-dir.\n"
"For more information see: http://goo.gl/ngdGD5")
sys.exit(1)
if self.profile_dir and not os.path.isdir(self.profile_dir):
logging.critical(
"Directory specified by --profile-dir (%s) doesn't exist "
"or isn't a directory.\n"
"For more information see: http://goo.gl/ngdGD5" % self.profile_dir)
sys.exit(1)
if not self.profile_dir:
self.profile_dir = profile_types.GetProfileDir(self.profile_type)
# This deferred import is necessary because browser_options is imported in
# telemetry/telemetry/__init__.py.
from telemetry.core.backends.chrome import chrome_browser_options
finder_options.browser_options = (
chrome_browser_options.CreateChromeBrowserOptions(self))
@property
def extra_browser_args(self):
return self._extra_browser_args
def AppendExtraBrowserArgs(self, args):
if isinstance(args, list):
self._extra_browser_args.update(args)
else:
self._extra_browser_args.add(args)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import base64
import json
import os
import socket
import struct
import uuid
import time
from hashlib import md5 as MD5
from binascii import crc32
from random import Random
from core.err_code import err_desc_en, err_desc_ch
from utils.timeUtil import get_current_time
DEBIAN_VERSION_FILE = "/etc/debian_version"
CENTOS_VERSION_FILE = "/etc/centos-release"
REDHAT_VERSION_FILE = "/etc/redhat-release"
PLATFORM_DEBIAN = "debian7"
PLATFORM_REDCENT6 = "redcent6"
PLATFORM_REDCENT7 = "redcent7"
# generate random str which len is randomlength.
def random_str(randomlength=8):
str = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
length = len(chars) - 1
random = Random()
for i in range(randomlength):
str += chars[random.randint(0, length)]
return str
def CRC32(crcStr):
return crc32(crcStr.encode()) & 0xFFFFFFFF
def listFiles(fileDir, keyword=None):
fileList = []
for file in os.listdir(fileDir):
if (not os.path.isdir(file) and (not keyword or file.find(keyword) != -1)):
fileList.append(file)
return fileList
def getPlatform():
if (os.path.exists(DEBIAN_VERSION_FILE)):
fd = open(DEBIAN_VERSION_FILE, "r")
line = fd.readline()
version = line.split(".")[0]
fd.close()
return "debian" + version
elif (os.path.exists(CENTOS_VERSION_FILE)):
filePath = CENTOS_VERSION_FILE
else:
filePath = REDHAT_VERSION_FILE
fd = open(filePath, "r")
line = fd.readline()
version = line.split(".")[0].split(" ")[-1]
fd.close()
return "readcent" + version
def isPlatformDebian():
return getPlatform() == PLATFORM_DEBIAN
def ip2long(ip):
packedIP = socket.inet_aton(ip)
return struct.unpack("!L", packedIP)[0]
def removeFile(filepath):
if (filepath == None or os.path.exists(filepath) == False):
return
os.remove(filepath)
def buildRetMsg(errorCode, data=None, errorLog=None):
if (not errorLog):
return (errorCode, data)
else:
return (errorCode, data, errorLog)
def buildRetObj(errorCode, data=None, errorLog=""):
return {
"RetCode": errorCode,
"RetObj": data,
"ErrorLog": errorLog
}
def toString(src, encoding="utf-8"):
if (type(src) == str):
try:
return src.encode(encoding)
except:
return octUnicode(src).encode(encoding)
else:
return src
def transToObj(string):
if (string == None):
return None
if (type(string) != type("a") and type(string) != type('a')):
string = string.encode()
if (len(string) < 2):
return None
try:
obj = json.loads(string, encoding="utf-8")
except:
obj = {}
return obj
def tryToDump(string):
if (string == None):
return {}
if (type(string) != type("a")):
string = string.encode()
if (len(string) < 2):
return {}
try:
obj = json.loads(string)
except:
obj = string
return json.dumps(obj, sort_keys=True, indent=4)
def getStrTime(milisecs):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(milisecs) / 1000))
def isSystemWindows():
import platform
if (platform.system() == "Windows"):
return True
else:
return False
def isSystemMac():
import platform
if (platform.system() == "Darwin"):
return True
else:
return False
def transToStr(obj, indent=False):
if (indent != False):
return json.dumps(obj, ensure_ascii=False, indent=indent)
else:
return json.dumps(obj, ensure_ascii=False)
def oct_trim(inStr):
segs = inStr.split(" ")
result = ""
for seg in segs:
if (seg == ''):
continue
result += seg
result += " "
return result.rstrip()
def OCT_SYSTEM(formatStr, arg=None):
TEMPFILE_NAME = "/tmp/OCTTEMP_FILE_%ld%s" % (get_current_time(), getUuid())
if (arg):
CMD = formatStr % arg
else:
CMD = formatStr
CMD += " > %s" % (TEMPFILE_NAME)
ret = os.system(CMD)
fp = open(TEMPFILE_NAME, 'r')
if (fp == None):
return (ret >> 8 & 0XFF, None)
data = fp.read()
fp.close()
os.remove(TEMPFILE_NAME)
if (len(data) == 0):
return (ret >> 8 & 0XFF, None)
if (data[-1] == '\n'):
data = data[:-1] # to remove last "\n"
if (len(data) == 0):
data = None
return (ret >> 8 & 0XFF, data)
def OCT_PIPERUN(cmd):
import subprocess
if (cmd == None):
return (0, None)
args = cmd.split()
p = subprocess.Popen(args, close_fds=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False)
p.wait()
ret = p.returncode
msg = p.stdout.read()
return (ret, msg)
def getUuid(spilt=None):
if (spilt):
return str(uuid.uuid4())
else:
x = uuid.uuid4().hex
return x
def allocVmMac(vmId, nicId):
m = MD5()
string = "%s/%s" % (vmId, nicId)
m.update(string.encode())
v = m.hexdigest()
return "52:54:%s:%s:%s:%s" % (v[0:2], v[4:6], v[8:10], v[12:14])
def trimUuid(uuid):
segs = uuid.split("-")
if (len(segs) != 5):
return uuid
return "%s%s%s%s%s" % (uuid[0:8],
uuid[9:13],
uuid[14:18],
uuid[19:23],
uuid[24:36])
def expandUuid(uuid):
if (uuid[8] == '-'):
return uuid
return "%s-%s-%s-%s-%s" % (uuid[0:8],
uuid[8:12],
uuid[12:16],
uuid[16:20],
uuid[20:32])
def jsonStringFormat(objString):
if (type(objString) == str):
obj = transToObj(objString)
toString = objString
else:
obj = objString
toString = transToStr(objString)
try:
result = json.dumps(obj, sort_keys=True, indent=2)
except:
result = toString
return result
def octUnicode(src):
if (type(src) == str):
return src
else:
try:
return str(src, "utf-8")
except:
return src
def fileToObj(filePath):
if (not os.path.exists(filePath)):
print(("file %s not exist" % (filePath)))
return None
fd = open(filePath, "r", encoding="utf-8")
if (not fd):
print(("open file %s error" % (filePath)))
return None
obj = transToObj(fd.read())
fd.close()
return obj
def getErrorMsgCN(error):
return err_desc_ch.get(error) or ""
def getErrorMsg(error):
return err_desc_en.get(error) or ""
def isValidJson(string):
if (string == None):
return False
try:
eval(string)
except Exception as e:
return False
return True
def format_path_net(path):
flag = 0
if path == None:
return None
path = path.replace(' ', '')
path_temp = path.split(':')
path_t = '/' + path_temp[1] + '/'
path = path_temp[0] + ':' + path_t
path_str = ''
for s_temp in path:
if flag == 1 and s_temp == '/':
continue
if s_temp == '/':
flag = 1
else:
flag = 0
path_str = path_str + s_temp
return path_str
def get_pid_by_process_name(name):
cmd = 'ps -ae | grep -w %s' % name
ret, data = OCT_SYSTEM(cmd)
if ret != 0:
return None
return data.split()[0]
def b64_decode(src):
if not src:
return ""
return base64.b64decode(src.encode()).decode()
def b64_encode(src):
if not src:
return ""
return base64.b64encode(src.encode()).decode()
| |
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import functools
import random
import re
import threading
import time
import timeit
from collections import defaultdict
from types import TracebackType
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
from elasticapm.conf import constants
from elasticapm.conf.constants import LABEL_RE, SPAN, TRANSACTION
from elasticapm.context import init_execution_context
from elasticapm.metrics.base_metrics import Timer
from elasticapm.utils import compat, encoding, get_name_from_func, nested_key, url_to_destination_resource
from elasticapm.utils.disttracing import TraceParent, TracingOptions
from elasticapm.utils.logging import get_logger
from elasticapm.utils.time import time_to_perf_counter
__all__ = ("capture_span", "label", "set_transaction_name", "set_custom_context", "set_user_context")
error_logger = get_logger("elasticapm.errors")
logger = get_logger("elasticapm.traces")
_time_func = timeit.default_timer
execution_context = init_execution_context()
SpanType = Union["Span", "DroppedSpan"]
class ChildDuration(object):
__slots__ = ("obj", "_nesting_level", "_start", "_duration", "_lock")
def __init__(self, obj: "BaseSpan"):
self.obj = obj
self._nesting_level: int = 0
self._start: float = 0
self._duration: float = 0
self._lock = threading.Lock()
def start(self, timestamp: float):
with self._lock:
self._nesting_level += 1
if self._nesting_level == 1:
self._start = timestamp
def stop(self, timestamp: float):
with self._lock:
self._nesting_level -= 1
if self._nesting_level == 0:
self._duration += timestamp - self._start
@property
def duration(self) -> float:
return self._duration
class BaseSpan(object):
def __init__(self, labels=None, start=None):
self._child_durations = ChildDuration(self)
self.labels = {}
self.outcome: Optional[str] = None
self.compression_buffer: Optional[Union[Span, DroppedSpan]] = None
self.compression_buffer_lock = threading.Lock()
self.start_time: float = time_to_perf_counter(start) if start is not None else _time_func()
self.ended_time: Optional[float] = None
self.duration: Optional[float] = None
if labels:
self.label(**labels)
def child_started(self, timestamp):
self._child_durations.start(timestamp)
def child_ended(self, child: SpanType):
with self.compression_buffer_lock:
if not child.is_compression_eligible():
if self.compression_buffer:
self.compression_buffer.report()
self.compression_buffer = None
child.report()
elif self.compression_buffer is None:
self.compression_buffer = child
elif not self.compression_buffer.try_to_compress(child):
self.compression_buffer.report()
self.compression_buffer = child
def end(self, skip_frames: int = 0, duration: Optional[float] = None):
self.ended_time = _time_func()
self.duration = duration if duration is not None else (self.ended_time - self.start_time)
if self.compression_buffer:
self.compression_buffer.report()
self.compression_buffer = None
def to_dict(self) -> dict:
raise NotImplementedError()
def label(self, **labels):
"""
Label this span with one or multiple key/value labels. Keys should be strings, values can be strings, booleans,
or numerical values (int, float, Decimal)
span_obj.label(key1="value1", key2=True, key3=42)
Note that keys will be dedotted, replacing dot (.), star (*) and double quote (") with an underscore (_)
:param labels: key/value pairs of labels
:return: None
"""
labels = encoding.enforce_label_format(labels)
self.labels.update(labels)
def set_success(self):
self.outcome = constants.OUTCOME.SUCCESS
def set_failure(self):
self.outcome = constants.OUTCOME.FAILURE
@staticmethod
def get_dist_tracing_id() -> str:
return "%016x" % random.getrandbits(64)
@property
def tracer(self) -> "Tracer":
raise NotImplementedError()
class Transaction(BaseSpan):
def __init__(
self,
tracer: "Tracer",
transaction_type: str = "custom",
trace_parent: Optional[TraceParent] = None,
is_sampled: bool = True,
start: Optional[float] = None,
sample_rate: Optional[float] = None,
):
"""
tracer
Tracer object
transaction_type
Transaction type
trace_parent
TraceParent object representing the parent trace and trace state
is_sampled
Whether or not this transaction is sampled
start
Optional start timestamp. This is expected to be an epoch timestamp
in seconds (such as from `time.time()`). If it is not, it's recommended
that a `duration` is passed into the `end()` method.
sample_rate
Sample rate which was used to decide whether to sample this transaction.
This is reported to the APM server so that unsampled transactions can
be extrapolated.
"""
self.id = self.get_dist_tracing_id()
if not trace_parent:
trace_parent = TraceParent(
constants.TRACE_CONTEXT_VERSION,
"%032x" % random.getrandbits(128),
self.id,
TracingOptions(recorded=is_sampled),
)
self.trace_parent: TraceParent = trace_parent
self.timestamp = start if start is not None else time.time()
self.name: Optional[str] = None
self.result: Optional[str] = None
self.transaction_type = transaction_type
self._tracer = tracer
self.dropped_spans: int = 0
self.context: Dict[str, Any] = {}
self._is_sampled = is_sampled
self.sample_rate = sample_rate
self._span_counter: int = 0
self._span_timers: Dict[Tuple[str, str], Timer] = defaultdict(Timer)
self._span_timers_lock = threading.Lock()
self._dropped_span_statistics = defaultdict(lambda: {"count": 0, "duration.sum.us": 0})
try:
self._breakdown = self.tracer._agent._metrics.get_metricset(
"elasticapm.metrics.sets.breakdown.BreakdownMetricSet"
)
except (LookupError, AttributeError):
self._breakdown = None
super(Transaction, self).__init__(start=start)
def end(self, skip_frames: int = 0, duration: Optional[float] = None):
super().end(skip_frames, duration)
if self._breakdown:
for (span_type, span_subtype), timer in compat.iteritems(self._span_timers):
labels = {
"span.type": span_type,
"transaction.name": self.name,
"transaction.type": self.transaction_type,
}
if span_subtype:
labels["span.subtype"] = span_subtype
val = timer.val
self._breakdown.timer("span.self_time", reset_on_collect=True, unit="us", **labels).update(
int(val[0] * 1000000), val[1]
)
if self.is_sampled:
self._breakdown.timer(
"span.self_time",
reset_on_collect=True,
unit="us",
**{"span.type": "app", "transaction.name": self.name, "transaction.type": self.transaction_type},
).update(int((self.duration - self._child_durations.duration) * 1000000))
def _begin_span(
self,
name,
span_type,
context=None,
leaf=False,
labels=None,
parent_span_id=None,
span_subtype=None,
span_action=None,
sync=None,
start=None,
):
parent_span = execution_context.get_span()
tracer = self.tracer
if parent_span and parent_span.leaf:
span = DroppedSpan(parent_span, leaf=True)
elif tracer.config.transaction_max_spans and self._span_counter > tracer.config.transaction_max_spans - 1:
self.dropped_spans += 1
span = DroppedSpan(parent_span, context=context)
else:
span = Span(
transaction=self,
name=name,
span_type=span_type or "code.custom",
context=context,
leaf=leaf,
labels=labels,
parent=parent_span,
parent_span_id=parent_span_id,
span_subtype=span_subtype,
span_action=span_action,
sync=sync,
start=start,
)
span.frames = tracer.frames_collector_func()
self._span_counter += 1
execution_context.set_span(span)
return span
def begin_span(
self,
name,
span_type,
context=None,
leaf=False,
labels=None,
span_subtype=None,
span_action=None,
sync=None,
start=None,
):
"""
Begin a new span
:param name: name of the span
:param span_type: type of the span
:param context: a context dict
:param leaf: True if this is a leaf span
:param labels: a flat string/string dict of labels
:param span_subtype: sub type of the span, e.g. "postgresql"
:param span_action: action of the span , e.g. "query"
:param sync: indicate if the span is synchronous or not. In most cases, `None` should be used
:param start: timestamp, mostly useful for testing
:return: the Span object
"""
return self._begin_span(
name,
span_type,
context=context,
leaf=leaf,
labels=labels,
parent_span_id=None,
span_subtype=span_subtype,
span_action=span_action,
sync=sync,
start=start,
)
def end_span(self, skip_frames: int = 0, duration: Optional[float] = None, outcome: str = "unknown"):
"""
End the currently active span
:param skip_frames: numbers of frames to skip in the stack trace
:param duration: override duration, mostly useful for testing
:param outcome: outcome of the span, either success, failure or unknown
:return: the ended span
"""
span = execution_context.get_span()
if span is None:
raise LookupError()
# only overwrite span outcome if it is still unknown
if not span.outcome or span.outcome == "unknown":
span.outcome = outcome
span.end(skip_frames=skip_frames, duration=duration)
return span
def ensure_parent_id(self) -> str:
"""If current trace_parent has no span_id, generate one, then return it
This is used to generate a span ID which the RUM agent will use to correlate
the RUM transaction with the backend transaction.
"""
if self.trace_parent.span_id == self.id:
self.trace_parent.span_id = "%016x" % random.getrandbits(64)
logger.debug("Set parent id to generated %s", self.trace_parent.span_id)
return self.trace_parent.span_id
def to_dict(self) -> dict:
self.context["tags"] = self.labels
result = {
"id": self.id,
"trace_id": self.trace_parent.trace_id,
"name": encoding.keyword_field(self.name or ""),
"type": encoding.keyword_field(self.transaction_type),
"duration": self.duration * 1000, # milliseconds
"result": encoding.keyword_field(str(self.result)),
"timestamp": int(self.timestamp * 1000000), # microseconds
"outcome": self.outcome,
"sampled": self.is_sampled,
"span_count": {"started": self._span_counter, "dropped": self.dropped_spans},
}
if self._dropped_span_statistics:
result["dropped_spans_stats"] = [
{
"destination_service_resource": resource,
"outcome": outcome,
"duration": {"count": v["count"], "sum": {"us": int(v["duration.sum.us"] * 1000000)}},
}
for (resource, outcome), v in self._dropped_span_statistics.items()
]
if self.sample_rate is not None:
result["sample_rate"] = float(self.sample_rate)
if self.trace_parent:
result["trace_id"] = self.trace_parent.trace_id
# only set parent_id if this transaction isn't the root
if self.trace_parent.span_id and self.trace_parent.span_id != self.id:
result["parent_id"] = self.trace_parent.span_id
# faas context belongs top-level on the transaction
if "faas" in self.context:
result["faas"] = self.context.pop("faas")
if self.is_sampled:
result["context"] = self.context
return result
def track_span_duration(self, span_type, span_subtype, self_duration):
# TODO: once asynchronous spans are supported, we should check if the transaction is already finished
# TODO: and, if it has, exit without tracking.
with self._span_timers_lock:
self._span_timers[(span_type, span_subtype)].update(self_duration)
@property
def is_sampled(self) -> bool:
return self._is_sampled
@is_sampled.setter
def is_sampled(self, is_sampled):
"""
This should never be called in normal operation, but often is used
for testing. We just want to make sure our sample_rate comes out correctly
in tracestate if we set is_sampled to False.
"""
self._is_sampled = is_sampled
if not is_sampled:
if self.sample_rate:
self.sample_rate = "0"
self.trace_parent.add_tracestate(constants.TRACESTATE.SAMPLE_RATE, self.sample_rate)
@property
def tracer(self) -> "Tracer":
return self._tracer
def track_dropped_span(self, span: SpanType):
with self._span_timers_lock:
try:
resource = span.context["destination"]["service"]["resource"]
stats = self._dropped_span_statistics[(resource, span.outcome)]
stats["count"] += 1
stats["duration.sum.us"] += span.duration
except KeyError:
pass
class Span(BaseSpan):
__slots__ = (
"id",
"transaction",
"name",
"type",
"subtype",
"action",
"context",
"leaf",
"dist_tracing_propagated",
"timestamp",
"start_time",
"ended_time",
"duration",
"parent",
"parent_span_id",
"frames",
"labels",
"sync",
"outcome",
"_child_durations",
)
def __init__(
self,
transaction: Transaction,
name: str,
span_type: str,
context: Optional[dict] = None,
leaf: bool = False,
labels: Optional[dict] = None,
parent: Optional["Span"] = None,
parent_span_id: Optional[str] = None,
span_subtype: Optional[str] = None,
span_action: Optional[str] = None,
sync: Optional[bool] = None,
start: Optional[int] = None,
):
"""
Create a new Span
:param transaction: transaction object that this span relates to
:param name: Generic name of the span
:param span_type: type of the span, e.g. db
:param context: context dictionary
:param leaf: is this span a leaf span?
:param labels: a dict of labels
:param parent_span_id: override of the span ID
:param span_subtype: sub type of the span, e.g. mysql
:param span_action: sub type of the span, e.g. query
:param sync: indicate if the span was executed synchronously or asynchronously
:param start: timestamp, mostly useful for testing
"""
self.id = self.get_dist_tracing_id()
self.transaction = transaction
self.name = name
self.context = context if context is not None else {}
self.leaf = leaf
# timestamp is bit of a mix of monotonic and non-monotonic time sources.
# we take the (non-monotonic) transaction timestamp, and add the (monotonic) difference of span
# start time and transaction start time. In this respect, the span timestamp is guaranteed to grow
# monotonically with respect to the transaction timestamp
self.parent = parent
self.parent_span_id = parent_span_id
self.frames = None
self.sync = sync
self.type = span_type
self.subtype = span_subtype
self.action = span_action
self.dist_tracing_propagated = False
self.composite: Dict[str, Any] = {}
super(Span, self).__init__(labels=labels, start=start)
self.timestamp = transaction.timestamp + (self.start_time - transaction.start_time)
if self.transaction._breakdown:
p = self.parent if self.parent else self.transaction
p.child_started(self.start_time)
def to_dict(self) -> dict:
if (
self.composite
and self.composite["compression_strategy"] == "same_kind"
and nested_key(self.context, "destination", "service", "resource")
):
name = "Calls to " + self.context["destination"]["service"]["resource"]
else:
name = self.name
result = {
"id": self.id,
"transaction_id": self.transaction.id,
"trace_id": self.transaction.trace_parent.trace_id,
# use either the explicitly set parent_span_id, or the id of the parent, or finally the transaction id
"parent_id": self.parent_span_id or (self.parent.id if self.parent else self.transaction.id),
"name": encoding.keyword_field(name),
"type": encoding.keyword_field(self.type),
"subtype": encoding.keyword_field(self.subtype),
"action": encoding.keyword_field(self.action),
"timestamp": int(self.timestamp * 1000000), # microseconds
"duration": self.duration * 1000, # milliseconds
"outcome": self.outcome,
}
if self.transaction.sample_rate is not None:
result["sample_rate"] = float(self.transaction.sample_rate)
if self.sync is not None:
result["sync"] = self.sync
if self.labels:
if self.context is None:
self.context = {}
self.context["tags"] = self.labels
if self.context:
resource = nested_key(self.context, "destination", "service", "resource")
if not resource and (self.leaf or any(k in self.context for k in ("destination", "db", "message", "http"))):
type_info = self.subtype or self.type
instance = nested_key(self.context, "db", "instance")
queue_name = nested_key(self.context, "message", "queue", "name")
http_url = nested_key(self.context, "http", "url")
if instance:
resource = f"{type_info}/{instance}"
elif queue_name:
resource = f"{type_info}/{queue_name}"
elif http_url:
resource = url_to_destination_resource(http_url)
else:
resource = type_info
if "destination" not in self.context:
self.context["destination"] = {}
if "service" not in self.context["destination"]:
self.context["destination"]["service"] = {}
self.context["destination"]["service"]["resource"] = resource
# set fields that are deprecated, but still required by APM Server API
if "name" not in self.context["destination"]["service"]:
self.context["destination"]["service"]["name"] = ""
if "type" not in self.context["destination"]["service"]:
self.context["destination"]["service"]["type"] = ""
result["context"] = self.context
if self.frames:
result["stacktrace"] = self.frames
if self.composite:
result["composite"] = {
"compression_strategy": self.composite["compression_strategy"],
"sum": self.composite["sum"] * 1000,
"count": self.composite["count"],
}
return result
def is_same_kind(self, other_span: SpanType) -> bool:
"""
For compression purposes, two spans are considered to be of the same kind if they have the same
values for type, subtype, and destination.service.resource
:param other_span: another span object
:return: bool
"""
resource = nested_key(self.context, "destination", "service", "resource")
return bool(
self.type == other_span.type
and self.subtype == other_span.subtype
and (resource and resource == nested_key(other_span.context, "destination", "service", "resource"))
)
def is_exact_match(self, other_span: SpanType) -> bool:
"""
For compression purposes, two spans are considered to be an exact match if the have the same
name and are of the same kind.
:param other_span: another span object
:return: bool
"""
return bool(self.name == other_span.name and self.is_same_kind(other_span))
def is_compression_eligible(self) -> bool:
"""
Determine if this span is eligible for compression.
"""
if self.tracer.config.span_compression_enabled:
return self.leaf and not self.dist_tracing_propagated and self.outcome in (None, constants.OUTCOME.SUCCESS)
return False
@property
def discardable(self) -> bool:
return self.leaf and not self.dist_tracing_propagated and self.outcome == constants.OUTCOME.SUCCESS
def end(self, skip_frames: int = 0, duration: Optional[float] = None):
"""
End this span and queue it for sending.
:param skip_frames: amount of frames to skip from the beginning of the stack trace
:param duration: override duration, mostly useful for testing
:return: None
"""
super().end(skip_frames, duration)
tracer = self.transaction.tracer
if not tracer.span_frames_min_duration or self.duration >= tracer.span_frames_min_duration and self.frames:
self.frames = tracer.frames_processing_func(self.frames)[skip_frames:]
else:
self.frames = None
execution_context.set_span(self.parent)
p = self.parent if self.parent else self.transaction
if self.transaction._breakdown:
p._child_durations.stop(self.start_time + self.duration)
self.transaction.track_span_duration(
self.type, self.subtype, self.duration - self._child_durations.duration
)
p.child_ended(self)
def report(self) -> None:
if self.discardable and self.duration < self.tracer.config.exit_span_min_duration:
self.transaction.track_dropped_span(self)
self.transaction.dropped_spans += 1
else:
self.tracer.queue_func(SPAN, self.to_dict())
def try_to_compress(self, sibling: SpanType) -> bool:
compression_strategy = (
self._try_to_compress_composite(sibling) if self.composite else self._try_to_compress_regular(sibling)
)
if not compression_strategy:
return False
if not self.composite:
self.composite = {"compression_strategy": compression_strategy, "count": 1, "sum": self.duration}
self.composite["count"] += 1
self.composite["sum"] += sibling.duration
self.duration = sibling.ended_time - self.start_time
self.transaction._span_counter -= 1
return True
def _try_to_compress_composite(self, sibling: SpanType) -> Optional[str]:
if self.composite["compression_strategy"] == "exact_match":
return (
"exact_match"
if (
self.is_exact_match(sibling)
and sibling.duration <= self.transaction.tracer.config.span_compression_exact_match_max_duration
)
else None
)
elif self.composite["compression_strategy"] == "same_kind":
return (
"same_kind"
if (
self.is_same_kind(sibling)
and sibling.duration <= self.transaction.tracer.config.span_compression_same_kind_max_duration
)
else None
)
return None
def _try_to_compress_regular(self, sibling: SpanType) -> Optional[str]:
if not self.is_same_kind(sibling):
return None
if self.name == sibling.name:
max_duration = self.transaction.tracer.config.span_compression_exact_match_max_duration
if self.duration <= max_duration and sibling.duration <= max_duration:
return "exact_match"
return None
max_duration = self.transaction.tracer.config.span_compression_same_kind_max_duration
if self.duration <= max_duration and sibling.duration <= max_duration:
return "same_kind"
return None
def update_context(self, key, data):
"""
Update the context data for given key
:param key: the key, e.g. "db"
:param data: a dictionary
:return: None
"""
current = self.context.get(key, {})
current.update(data)
self.context[key] = current
def __str__(self):
return "{}/{}/{}".format(self.name, self.type, self.subtype)
@property
def tracer(self) -> "Tracer":
return self.transaction.tracer
class DroppedSpan(BaseSpan):
__slots__ = ("leaf", "parent", "id", "context", "outcome", "dist_tracing_propagated")
def __init__(self, parent, leaf=False, start=None, context=None):
self.parent = parent
self.leaf = leaf
self.id = None
self.dist_tracing_propagated = False
self.context = context
self.outcome = constants.OUTCOME.UNKNOWN
super(DroppedSpan, self).__init__(start=start)
def end(self, skip_frames: int = 0, duration: Optional[float] = None):
super().end(skip_frames, duration)
execution_context.set_span(self.parent)
def child_started(self, timestamp):
pass
def child_ended(self, child: SpanType):
pass
def update_context(self, key, data):
pass
def report(self):
pass
def try_to_compress(self, sibling: SpanType) -> bool:
return False
def is_compression_eligible(self) -> bool:
return False
@property
def name(self):
return "DroppedSpan"
@property
def type(self):
return None
@property
def subtype(self):
return None
@property
def action(self):
return None
class Tracer(object):
def __init__(self, frames_collector_func, frames_processing_func, queue_func, config, agent):
self.config = config
self.queue_func = queue_func
self.frames_processing_func = frames_processing_func
self.frames_collector_func = frames_collector_func
self._agent = agent
self._ignore_patterns = [re.compile(p) for p in config.transactions_ignore_patterns or []]
@property
def span_frames_min_duration(self):
if self.config.span_frames_min_duration in (-1, None):
return None
else:
return self.config.span_frames_min_duration / 1000.0
def begin_transaction(self, transaction_type, trace_parent=None, start=None):
"""
Start a new transactions and bind it in a thread-local variable
:param transaction_type: type of the transaction, e.g. "request"
:param trace_parent: an optional TraceParent object
:param start: override the start timestamp, mostly useful for testing
:returns the Transaction object
"""
if trace_parent:
is_sampled = bool(trace_parent.trace_options.recorded)
sample_rate = trace_parent.tracestate_dict.get(constants.TRACESTATE.SAMPLE_RATE)
else:
is_sampled = (
self.config.transaction_sample_rate == 1.0 or self.config.transaction_sample_rate > random.random()
)
if not is_sampled:
sample_rate = "0"
else:
sample_rate = str(self.config.transaction_sample_rate)
transaction = Transaction(
self,
transaction_type,
trace_parent=trace_parent,
is_sampled=is_sampled,
start=start,
sample_rate=sample_rate,
)
if trace_parent is None:
transaction.trace_parent.add_tracestate(constants.TRACESTATE.SAMPLE_RATE, sample_rate)
execution_context.set_transaction(transaction)
return transaction
def end_transaction(self, result=None, transaction_name=None, duration=None):
"""
End the current transaction and queue it for sending
:param result: result of the transaction, e.g. "OK" or 200
:param transaction_name: name of the transaction
:param duration: override duration, mostly useful for testing
:return:
"""
transaction = execution_context.get_transaction(clear=True)
if transaction:
if transaction.name is None:
transaction.name = str(transaction_name) if transaction_name is not None else ""
transaction.end(duration=duration)
if self._should_ignore(transaction.name):
return
if transaction.result is None:
transaction.result = result
self.queue_func(TRANSACTION, transaction.to_dict())
return transaction
def _should_ignore(self, transaction_name):
for pattern in self._ignore_patterns:
if pattern.search(transaction_name):
return True
return False
class capture_span(object):
__slots__ = (
"name",
"type",
"subtype",
"action",
"extra",
"skip_frames",
"leaf",
"labels",
"duration",
"start",
"sync",
)
def __init__(
self,
name: Optional[str] = None,
span_type: str = "code.custom",
extra: Optional[dict] = None,
skip_frames: int = 0,
leaf: bool = False,
labels: Optional[dict] = None,
span_subtype: Optional[str] = None,
span_action: Optional[str] = None,
start: Optional[int] = None,
duration: Optional[float] = None,
sync: Optional[bool] = None,
):
self.name = name
if span_subtype is None and "." in span_type:
# old style dotted type, let's split it up
type_bits = span_type.split(".")
if len(type_bits) == 2:
span_type, span_subtype = type_bits[:2]
else:
span_type, span_subtype, span_action = type_bits[:3]
self.type = span_type
self.subtype = span_subtype
self.action = span_action
self.extra = extra
self.skip_frames = skip_frames
self.leaf = leaf
self.labels = labels
self.start = start
self.duration = duration
self.sync = sync
def __call__(self, func: Callable) -> Callable:
self.name = self.name or get_name_from_func(func)
@functools.wraps(func)
def decorated(*args, **kwds):
with self:
return func(*args, **kwds)
return decorated
def __enter__(self) -> Optional[SpanType]:
return self.handle_enter(self.sync)
def __exit__(
self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]
) -> None:
self.handle_exit(exc_type, exc_val, exc_tb)
def handle_enter(self, sync: bool) -> Optional[SpanType]:
transaction = execution_context.get_transaction()
if transaction and transaction.is_sampled:
return transaction.begin_span(
self.name,
self.type,
context=self.extra,
leaf=self.leaf,
labels=self.labels,
span_subtype=self.subtype,
span_action=self.action,
start=self.start,
sync=sync,
)
return None
def handle_exit(
self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]
) -> None:
transaction = execution_context.get_transaction()
if transaction and transaction.is_sampled:
try:
outcome = "failure" if exc_val else "success"
span = transaction.end_span(self.skip_frames, duration=self.duration, outcome=outcome)
should_track_dropped = (
transaction.tracer._agent.check_server_version(gte=(7, 16)) if transaction.tracer._agent else True
)
if should_track_dropped and isinstance(span, DroppedSpan) and span.context:
transaction.track_dropped_span(span)
if exc_val and not isinstance(span, DroppedSpan):
try:
exc_val._elastic_apm_span_id = span.id
except AttributeError:
# could happen if the exception has __slots__
pass
except LookupError:
logger.debug("ended non-existing span %s of type %s", self.name, self.type)
def label(**labels):
"""
Labels current transaction. Keys should be strings, values can be strings, booleans,
or numerical values (int, float, Decimal)
:param labels: key/value map of labels
"""
transaction = execution_context.get_transaction()
if not transaction:
error_logger.warning("Ignored labels %s. No transaction currently active.", ", ".join(labels.keys()))
else:
transaction.label(**labels)
def set_transaction_name(name: str, override: bool = True) -> None:
"""
Sets the name of the transaction
:param name: the name of the transaction
:param override: if set to False, the name is only set if no name has been set before
:return: None
"""
transaction = execution_context.get_transaction()
if not transaction:
return
if transaction.name is None or override:
transaction.name = str(name)
def set_transaction_result(result, override=True):
"""
Sets the result of the transaction. The result could be e.g. the HTTP status class (e.g "HTTP 5xx") for
HTTP requests, or "success"/"failure" for background tasks.
:param result: Details of the transaction result that should be set
:param override: if set to False, the name is only set if no name has been set before
:return: None
"""
transaction = execution_context.get_transaction()
if not transaction:
return
if transaction.result is None or override:
transaction.result = result
def set_transaction_outcome(outcome=None, http_status_code=None, override=True):
"""
Set the outcome of the transaction. This should only be done at the end of a transaction
after the outcome is determined.
If an invalid outcome is provided, an INFO level log message will be issued.
:param outcome: the outcome of the transaction. Allowed values are "success", "failure", "unknown". None is
allowed if a http_status_code is provided.
:param http_status_code: An integer value of the HTTP status code. If provided, the outcome will be determined
based on the status code: Success if the status is lower than 500, failure otherwise.
If both a valid outcome and an http_status_code is provided, the former is used
:param override: If set to False, the outcome will only be updated if its current value is None
:return: None
"""
transaction = execution_context.get_transaction()
if not transaction:
return
if http_status_code and outcome not in constants.OUTCOME:
try:
http_status_code = int(http_status_code)
outcome = constants.OUTCOME.SUCCESS if http_status_code < 500 else constants.OUTCOME.FAILURE
except ValueError:
logger.info('Invalid HTTP status %r provided, outcome set to "unknown"', http_status_code)
outcome = constants.OUTCOME.UNKNOWN
elif outcome not in constants.OUTCOME:
logger.info('Invalid outcome %r provided, outcome set to "unknown"', outcome)
outcome = constants.OUTCOME.UNKNOWN
if outcome and (transaction.outcome is None or override):
transaction.outcome = outcome
def get_transaction_id():
"""
Returns the current transaction ID
"""
transaction = execution_context.get_transaction()
if not transaction:
return
return transaction.id
def get_trace_parent_header():
"""
Return the trace parent header for the current transaction.
"""
transaction = execution_context.get_transaction()
if not transaction or not transaction.trace_parent:
return
return transaction.trace_parent.to_string()
def get_trace_id():
"""
Returns the current trace ID
"""
transaction = execution_context.get_transaction()
if not transaction:
return
return transaction.trace_parent.trace_id if transaction.trace_parent else None
def get_span_id():
"""
Returns the current span ID
"""
span = execution_context.get_span()
if not span:
return
return span.id
def set_context(data, key="custom"):
"""
Attach contextual data to the current transaction and errors that happen during the current transaction.
If the transaction is not sampled, this function becomes a no-op.
:param data: a dictionary, or a callable that returns a dictionary
:param key: the namespace for this data
"""
transaction = execution_context.get_transaction()
if not (transaction and transaction.is_sampled):
return
if callable(data):
data = data()
# remove invalid characters from key names
for k in list(data.keys()):
if LABEL_RE.search(k):
data[LABEL_RE.sub("_", k)] = data.pop(k)
if key in transaction.context:
transaction.context[key].update(data)
else:
transaction.context[key] = data
set_custom_context = functools.partial(set_context, key="custom")
def set_user_context(username=None, email=None, user_id=None):
data = {}
if username is not None:
data["username"] = encoding.keyword_field(username)
if email is not None:
data["email"] = encoding.keyword_field(email)
if user_id is not None:
data["id"] = encoding.keyword_field(user_id)
set_context(data, "user")
| |
# -*- coding: utf-8 -*-
"""
Convert between objects and flypy representations.
"""
from __future__ import print_function, division, absolute_import
import ctypes
import flypy as nb
from flypy import typing
from .representation import stack_allocate, byref, c_primitive
#===------------------------------------------------------------------===
# Object Conversion
#===------------------------------------------------------------------===
ctypes_type_memo = {}
def fromobject(value, type):
"""
Convert a Python value to a flypy representation according to `type`
(e.g. list -> List)
"""
cls = type.impl
if hasattr(cls, 'fromobject') and not isinstance(value, cls):
return cls.fromobject(value, type)
return value
def toobject(value, type):
"""
Convert a flypy value to a Python representation (e.g. List -> list)
"""
cls = type.impl
if hasattr(cls, 'toobject'):
return cls.toobject(value, type)
return value
def toctypes(value, type, keepalive, valmemo=None, typememo=None):
"""
Convert a flypy object given as a Python value to a low-level ctypes
representation.
Returns (ctypes_value, keep_alive)
"""
from flypy.types import int8
if hasattr(type, 'type'):
type = type.type
strtype = str(type)
if valmemo is None:
valmemo = {}
typememo = ctypes_type_memo
if (id(value), strtype) in valmemo:
return valmemo[id(value), strtype]
cls = type.impl
if hasattr(cls, 'toctypes'):
result = cls.toctypes(value, type)
else:
cty = ctype(type, typememo)
if not stack_allocate(type):
cty = cty._type_ # Get the base type
# Resolve types
layout = type.resolved_layout
if not layout:
types = [int8]
else:
types = [layout[name] for name, _ in cty._fields_]
# Dereference pointer to aggregate
if hasattr(value, 'contents'):
value = value.contents
# Resolve values
values = []
for (name, cty_field), ty in zip(cty._fields_, types):
if hasattr(value, name):
val = getattr(value, name)
else:
assert name == 'dummy', (name, value, value.__class__)
val = 0
cval = toctypes(val, ty, keepalive, valmemo, typememo)
values.append(cval)
# Construct value from ctypes struct
result = cty(*values)
if not stack_allocate(type):
keepalive.append(result)
result = ctypes.pointer(result)
valmemo[id(value), strtype] = result
return result
def fromctypes(value, ty, memo=None):
"""
Construct a flypy object from a ctypes representation.
"""
from flypy.support.ctypes_support import is_ctypes_pointer_type, CTypesStruct
if hasattr(ty, 'type'):
ty = ty.type
if memo is None:
memo = {}
# NOTE: This cache doesn't work by hashing on ids, since ctypes values
# are transient.
#if id(value) in memo:
# return memo[id(value)]
cls = ty.impl
if hasattr(cls, 'fromctypes'):
result = cls.fromctypes(value, ty)
else:
cls = ty.impl
layout = ty.resolved_layout
values = {}
if is_ctypes_pointer_type(type(value)):
# TODO: stack jit
# Recover original names from the type
cty = ctype(ty)
value = ctypes.cast(value, cty)
for name, ty in ty.resolved_layout.iteritems():
if is_ctypes_pointer_type(type(value)):
value = value[0]
cval = getattr(value, name)
pyval = fromctypes(cval, ty, memo)
values[name] = pyval
result = cls(**values)
#memo[id(value)] = result
return result
def ctype(type, memo=None):
"""
Return the low-level ctypes type representation for a flypy type instance.
"""
# -------------------------------------------------
# Setup cache
if hasattr(type, 'type'):
type = type.type
if memo is None:
memo = ctypes_type_memo
if type in memo:
return memo[type]
# -------------------------------------------------
# Handle custom ctype methods
cls = type.impl
if hasattr(cls, 'ctype'):
result = cls.ctype(type)
memo[type] = result
return result
# -------------------------------------------------
# Build dummy struct and cache result
class result(ctypes.Structure):
def __repr__(self):
return "{ %s }" % (", ".join("%s:%s" % (name, getattr(self, name))
for name in names))
struct = result
if not stack_allocate(type):
result = ctypes.POINTER(result)
memo[type] = result
# -------------------------------------------------
# Determine field ctypes
names, types = zip(*type.resolved_layout.items()) or [(), ()]
types = [ctype(ty, memo) for ty in types]
if not types:
names = ['dummy']
types = [ctypes.c_int8]
struct._fields_ = zip(names, types)
struct.__name__ = 'CTypes' + type.__class__.__name__
return result
def make_coercers(type):
"""
Build coercion functions that reconstruct the values.
"""
cls = type.impl
pycls = lambda *args: cls(*args)
layout = cls.layout
@jit('%s -> Type[Object] -> Object' % (type,))
def topy(obj, _):
args = []
for name, type in unroll(layout):
args.append(coerce(getattr(obj, name), Object))
return pycls(*args)
@jit('Object -> Type[%s] -> %s' % (type, type))
def frompy(obj, _):
args = []
for name, type in unroll(layout):
args.append(coerce(getattr(obj, name), type))
return cls(*args)
return topy, frompy
#===------------------------------------------------------------------===
# General Type Conversion
#===------------------------------------------------------------------===
# TODO:
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Categorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal
from tensorflow.python.platform import test
def make_categorical(batch_shape, num_classes, dtype=dtypes.int32):
logits = random_ops.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtypes.float32) - 50.
return categorical.Categorical(logits, dtype=dtype)
class CategoricalTest(test.TestCase, parameterized.TestCase):
def testP(self):
p = [0.2, 0.8]
dist = categorical.Categorical(probs=p)
with self.test_session():
self.assertAllClose(p, dist.probs.eval())
self.assertAllEqual([2], dist.logits.get_shape())
def testLogits(self):
p = np.array([0.2, 0.8], dtype=np.float32)
logits = np.log(p) - 50.
dist = categorical.Categorical(logits=logits)
with self.test_session():
self.assertAllEqual([2], dist.probs.get_shape())
self.assertAllEqual([2], dist.logits.get_shape())
self.assertAllClose(dist.probs.eval(), p)
self.assertAllClose(dist.logits.eval(), logits)
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertEqual(10, dist.event_size.eval())
# event_size is available as a constant because the shape is
# known at graph build time.
self.assertEqual(10, tensor_util.constant_value(dist.event_size))
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_categorical(
batch_shape, constant_op.constant(
10, dtype=dtypes.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertEqual(10, dist.event_size.eval())
def testDtype(self):
dist = make_categorical([], 5, dtype=dtypes.int32)
self.assertEqual(dist.dtype, dtypes.int32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
dist = make_categorical([], 5, dtype=dtypes.int64)
self.assertEqual(dist.dtype, dtypes.int64)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.probs.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dist.entropy().dtype)
self.assertEqual(
dist.logits.dtype, dist.prob(np.array(
0, dtype=np.int64)).dtype)
self.assertEqual(
dist.logits.dtype, dist.log_prob(np.array(
0, dtype=np.int64)).dtype)
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
dist = make_categorical([], 5, dtype=dtype)
self.assertEqual(dist.dtype, dtype)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
def testUnknownShape(self):
with self.test_session():
logits = array_ops.placeholder(dtype=dtypes.float32)
dist = categorical.Categorical(logits)
sample = dist.sample()
# Will sample class 1.
sample_value = sample.eval(feed_dict={logits: [-1000.0, 1000.0]})
self.assertEqual(1, sample_value)
# Batch entry 0 will sample class 1, batch entry 1 will sample class 0.
sample_value_batch = sample.eval(
feed_dict={logits: [[-1000.0, 1000.0], [1000.0, -1000.0]]})
self.assertAllEqual([1, 0], sample_value_batch)
def testPMFWithBatch(self):
histograms = [[0.2, 0.8], [0.6, 0.4]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
with self.test_session():
self.assertAllClose(dist.prob([0, 1]).eval(), [0.2, 0.4])
def testPMFNoBatch(self):
histograms = [0.2, 0.8]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
with self.test_session():
self.assertAllClose(dist.prob(0).eval(), 0.2)
def testCDFWithDynamicEventShapeKnownNdims(self):
"""Test that dynamically-sized events with unknown shape work."""
batch_size = 2
histograms = array_ops.placeholder(dtype=dtypes.float32,
shape=(batch_size, None))
event = array_ops.placeholder(dtype=dtypes.float32, shape=(batch_size,))
dist = categorical.Categorical(probs=histograms)
cdf_op = dist.cdf(event)
# Feed values into the placeholder with different shapes
# three classes.
event_feed_one = [0, 1]
histograms_feed_one = [[0.5, 0.3, 0.2], [1.0, 0.0, 0.0]]
expected_cdf_one = [0.0, 1.0]
feed_dict_one = {
histograms: histograms_feed_one,
event: event_feed_one
}
# six classes.
event_feed_two = [2, 5]
histograms_feed_two = [[0.9, 0.0, 0.0, 0.0, 0.0, 0.1],
[0.15, 0.2, 0.05, 0.35, 0.13, 0.12]]
expected_cdf_two = [0.9, 0.88]
feed_dict_two = {
histograms: histograms_feed_two,
event: event_feed_two
}
with self.test_session() as sess:
actual_cdf_one = sess.run(cdf_op, feed_dict=feed_dict_one)
actual_cdf_two = sess.run(cdf_op, feed_dict=feed_dict_two)
self.assertAllClose(actual_cdf_one, expected_cdf_one)
self.assertAllClose(actual_cdf_two, expected_cdf_two)
@parameterized.named_parameters(
("test1", [0, 1], [[0.5, 0.3, 0.2], [1.0, 0.0, 0.0]], [0.0, 1.0]),
("test2", [2, 5], [[0.9, 0.0, 0.0, 0.0, 0.0, 0.1],
[0.15, 0.2, 0.05, 0.35, 0.13, 0.12]], [0.9, 0.88]))
def testCDFWithDynamicEventShapeUnknownNdims(
self, events, histograms, expected_cdf):
"""Test that dynamically-sized events with unknown shape work."""
event_ph = array_ops.placeholder_with_default(events, shape=None)
histograms_ph = array_ops.placeholder_with_default(histograms, shape=None)
dist = categorical.Categorical(probs=histograms_ph)
cdf_op = dist.cdf(event_ph)
actual_cdf = self.evaluate(cdf_op)
self.assertAllClose(actual_cdf, expected_cdf)
def testCDFWithBatch(self):
histograms = [[0.1, 0.2, 0.3, 0.25, 0.15],
[0.0, 0.75, 0.2, 0.05, 0.0]]
event = [0, 3]
expected_cdf = [0.0, 0.95]
dist = categorical.Categorical(probs=histograms)
cdf_op = dist.cdf(event)
with self.test_session():
self.assertAllClose(cdf_op.eval(), expected_cdf)
def testCDFNoBatch(self):
histogram = [0.1, 0.2, 0.3, 0.4]
event = 2
expected_cdf = 0.3
dist = categorical.Categorical(probs=histogram)
cdf_op = dist.cdf(event)
with self.test_session():
self.assertAlmostEqual(cdf_op.eval(), expected_cdf)
def testCDFBroadcasting(self):
# shape: [batch=2, n_bins=3]
histograms = [[0.2, 0.1, 0.7],
[0.3, 0.45, 0.25]]
# shape: [batch=3, batch=2]
devent = [
[0, 0],
[1, 1],
[2, 2]
]
dist = categorical.Categorical(probs=histograms)
# We test that the probabilities are correctly broadcasted over the
# additional leading batch dimension of size 3.
expected_cdf_result = np.zeros((3, 2))
expected_cdf_result[0, 0] = 0
expected_cdf_result[0, 1] = 0
expected_cdf_result[1, 0] = 0.2
expected_cdf_result[1, 1] = 0.3
expected_cdf_result[2, 0] = 0.3
expected_cdf_result[2, 1] = 0.75
with self.test_session():
self.assertAllClose(dist.cdf(devent).eval(), expected_cdf_result)
def testBroadcastWithBatchParamsAndBiggerEvent(self):
## The parameters have a single batch dimension, and the event has two.
# param shape is [3 x 4], where 4 is the number of bins (non-batch dim).
cat_params_py = [
[0.2, 0.15, 0.35, 0.3],
[0.1, 0.05, 0.68, 0.17],
[0.1, 0.05, 0.68, 0.17]
]
# event shape = [5, 3], both are "batch" dimensions.
disc_event_py = [
[0, 1, 2],
[1, 2, 3],
[0, 0, 0],
[1, 1, 1],
[2, 1, 0]
]
# shape is [3]
normal_params_py = [
-10.0,
120.0,
50.0
]
# shape is [5, 3]
real_event_py = [
[-1.0, 0.0, 1.0],
[100.0, 101, -50],
[90, 90, 90],
[-4, -400, 20.0],
[0.0, 0.0, 0.0]
]
cat_params_tf = array_ops.constant(cat_params_py)
disc_event_tf = array_ops.constant(disc_event_py)
cat = categorical.Categorical(probs=cat_params_tf)
normal_params_tf = array_ops.constant(normal_params_py)
real_event_tf = array_ops.constant(real_event_py)
norm = normal.Normal(loc=normal_params_tf, scale=1.0)
# Check that normal and categorical have the same broadcasting behaviour.
to_run = {
"cat_prob": cat.prob(disc_event_tf),
"cat_log_prob": cat.log_prob(disc_event_tf),
"cat_cdf": cat.cdf(disc_event_tf),
"cat_log_cdf": cat.log_cdf(disc_event_tf),
"norm_prob": norm.prob(real_event_tf),
"norm_log_prob": norm.log_prob(real_event_tf),
"norm_cdf": norm.cdf(real_event_tf),
"norm_log_cdf": norm.log_cdf(real_event_tf),
}
with self.test_session() as sess:
run_result = sess.run(to_run)
self.assertAllEqual(run_result["cat_prob"].shape,
run_result["norm_prob"].shape)
self.assertAllEqual(run_result["cat_log_prob"].shape,
run_result["norm_log_prob"].shape)
self.assertAllEqual(run_result["cat_cdf"].shape,
run_result["norm_cdf"].shape)
self.assertAllEqual(run_result["cat_log_cdf"].shape,
run_result["norm_log_cdf"].shape)
def testLogPMF(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = categorical.Categorical(logits)
with self.test_session():
self.assertAllClose(dist.log_prob([0, 1]).eval(), np.log([0.2, 0.4]))
self.assertAllClose(dist.log_prob([0.0, 1.0]).eval(), np.log([0.2, 0.4]))
def testEntropyNoBatch(self):
logits = np.log([0.2, 0.8]) - 50.
dist = categorical.Categorical(logits)
with self.test_session():
self.assertAllClose(dist.entropy().eval(),
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)))
def testEntropyWithBatch(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = categorical.Categorical(logits)
with self.test_session():
self.assertAllClose(dist.entropy().eval(), [
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),
-(0.6 * np.log(0.6) + 0.4 * np.log(0.4))
])
def testEntropyGradient(self):
with self.test_session() as sess:
logits = constant_op.constant([[1., 2., 3.], [2., 5., 1.]])
probabilities = nn_ops.softmax(logits)
log_probabilities = nn_ops.log_softmax(logits)
true_entropy = - math_ops.reduce_sum(
probabilities * log_probabilities, axis=-1)
categorical_distribution = categorical.Categorical(probs=probabilities)
categorical_entropy = categorical_distribution.entropy()
# works
true_entropy_g = gradients_impl.gradients(true_entropy, [logits])
categorical_entropy_g = gradients_impl.gradients(
categorical_entropy, [logits])
res = sess.run({"true_entropy": true_entropy,
"categorical_entropy": categorical_entropy,
"true_entropy_g": true_entropy_g,
"categorical_entropy_g": categorical_entropy_g})
self.assertAllClose(res["true_entropy"],
res["categorical_entropy"])
self.assertAllClose(res["true_entropy_g"],
res["categorical_entropy_g"])
def testSample(self):
with self.test_session():
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
n = 10000
samples = dist.sample(n, seed=123)
samples.set_shape([n, 1, 2])
self.assertEqual(samples.dtype, dtypes.int32)
sample_values = samples.eval()
self.assertFalse(np.any(sample_values < 0))
self.assertFalse(np.any(sample_values > 1))
self.assertAllClose(
[[0.2, 0.4]], np.mean(
sample_values == 0, axis=0), atol=1e-2)
self.assertAllClose(
[[0.8, 0.6]], np.mean(
sample_values == 1, axis=0), atol=1e-2)
def testSampleWithSampleShape(self):
with self.test_session():
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
samples = dist.sample((100, 100), seed=123)
prob = dist.prob(samples)
prob_val = prob.eval()
self.assertAllClose(
[0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()], atol=1e-2)
self.assertAllClose(
[0.4**2 + 0.6**2], [prob_val[:, :, :, 1].mean()], atol=1e-2)
def testNotReparameterized(self):
p = constant_op.constant([0.3, 0.3, 0.4])
with backprop.GradientTape() as tape:
tape.watch(p)
dist = categorical.Categorical(p)
samples = dist.sample(100)
grad_p = tape.gradient(samples, p)
self.assertIsNone(grad_p)
def testLogPMFBroadcasting(self):
with self.test_session():
# 1 x 2 x 2
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
prob = dist.prob(1)
self.assertAllClose([[0.8, 0.6]], prob.eval())
prob = dist.prob([1])
self.assertAllClose([[0.8, 0.6]], prob.eval())
prob = dist.prob([0, 1])
self.assertAllClose([[0.2, 0.6]], prob.eval())
prob = dist.prob([[0, 1]])
self.assertAllClose([[0.2, 0.6]], prob.eval())
prob = dist.prob([[[0, 1]]])
self.assertAllClose([[[0.2, 0.6]]], prob.eval())
prob = dist.prob([[1, 0], [0, 1]])
self.assertAllClose([[0.8, 0.4], [0.2, 0.6]], prob.eval())
prob = dist.prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertAllClose([[[0.8, 0.6], [0.8, 0.4]], [[0.8, 0.4], [0.2, 0.6]]],
prob.eval())
def testLogPMFShape(self):
with self.test_session():
# shape [1, 2, 2]
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms))
log_prob = dist.log_prob([0, 1])
self.assertEqual(2, log_prob.get_shape().ndims)
self.assertAllEqual([1, 2], log_prob.get_shape())
log_prob = dist.log_prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertEqual(3, log_prob.get_shape().ndims)
self.assertAllEqual([2, 2, 2], log_prob.get_shape())
def testLogPMFShapeNoBatch(self):
histograms = [0.2, 0.8]
dist = categorical.Categorical(math_ops.log(histograms))
log_prob = dist.log_prob(0)
self.assertEqual(0, log_prob.get_shape().ndims)
self.assertAllEqual([], log_prob.get_shape())
log_prob = dist.log_prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertEqual(3, log_prob.get_shape().ndims)
self.assertAllEqual([2, 2, 2], log_prob.get_shape())
def testMode(self):
with self.test_session():
histograms = [[[0.2, 0.8], [0.6, 0.4]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
self.assertAllEqual(dist.mode().eval(), [[1, 0]])
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.test_session() as sess:
for categories in [2, 4]:
for batch_size in [1, 10]:
a_logits = np.random.randn(batch_size, categories)
b_logits = np.random.randn(batch_size, categories)
a = categorical.Categorical(logits=a_logits)
b = categorical.Categorical(logits=b_logits)
kl = kullback_leibler.kl_divergence(a, b)
kl_val = sess.run(kl)
# Make sure KL(a||a) is 0
kl_same = sess.run(kullback_leibler.kl_divergence(a, a))
prob_a = np_softmax(a_logits)
prob_b = np_softmax(b_logits)
kl_expected = np.sum(prob_a * (np.log(prob_a) - np.log(prob_b)),
axis=-1)
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
if __name__ == "__main__":
test.main()
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'ProjectOption', fields ['project', 'value', 'key']
try:
db.delete_unique('sentry_projectoptions', ['project_id', 'value', 'key'])
except:
pass
# Removing unique constraint on 'GroupMeta', fields ['group', 'value', 'key']
try:
db.delete_unique('sentry_groupmeta', ['group_id', 'value', 'key'])
except:
pass
# Removing unique constraint on 'Option', fields ['value', 'key']
try:
db.delete_unique('sentry_option', ['value', 'key'])
except:
pass
# Adding unique constraint on 'Option', fields ['key']
try:
db.create_unique('sentry_option', ['key'])
except:
pass
# Adding unique constraint on 'GroupMeta', fields ['group', 'key']
try:
db.create_unique('sentry_groupmeta', ['group_id', 'key'])
except:
pass
# Adding unique constraint on 'ProjectOption', fields ['project', 'key']
try:
db.create_unique('sentry_projectoptions', ['project_id', 'key'])
except:
pass
def backwards(self, orm):
# Removing unique constraint on 'ProjectOption', fields ['project', 'key']
db.delete_unique('sentry_projectoptions', ['project_id', 'key'])
# Removing unique constraint on 'GroupMeta', fields ['group', 'key']
db.delete_unique('sentry_groupmeta', ['group_id', 'key'])
# Removing unique constraint on 'Option', fields ['key']
db.delete_unique('sentry_option', ['key'])
# Adding unique constraint on 'Option', fields ['value', 'key']
db.create_unique('sentry_option', ['value', 'key'])
# Adding unique constraint on 'GroupMeta', fields ['group', 'value', 'key']
db.create_unique('sentry_groupmeta', ['group_id', 'value', 'key'])
# Adding unique constraint on 'ProjectOption', fields ['project', 'value', 'key']
db.create_unique('sentry_projectoptions', ['project_id', 'value', 'key'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['auth.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_set'", 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| |
"""OAuth2Manager class definition.
Copyright 2017 by Doug Mahugh. All Rights Reserved.
Licensed under the MIT License."""
import json
import os
import pprint
import time
import urllib.parse
import uuid
from bottle import redirect, request
import requests
class OAuth2Manager(object): #-----------------------------------------------<<<
"""Handles the details of obtaining an authorization code and access token
from Azure Active Directory, and provides wrappers for making authenticated
calls to the Microsoft Graph API.
"""
def __init__(self, config=None):
"""The config argument is a dictionary of configuration settings. If it
contains a configfile entry, settings are loaded from that file (a JSON
dictionary), and then any other entries in the config dictionary are
applied afterward (and can therefore override settings from the file).
The config object may include any of these keys:
configfile = name of JSON configuration file
app_name = application name (as registered in an Azure AD tenant)
app_id = application id (as registered in an Azure AD tenant)
app_secret = application secret (as registered in an Azure AD tenant)
redirect_url = redirect url (as registered in an Azure AD tenant)
scopes = list of scopes needed (e.g., ['Mail.Send'])
api_base = base URL for the protected resource/API; used for resolving
any relative URLs passed to the get() method
auth_base = base URL for the authorization request
token_url = URL for retrieving an access token via a POST
"""
# initialize properties used for OAuth2 configuration ...
self.app_name = ''
self.app_id = ''
self.app_secret = ''
self.redirect_url = ''
self.scopes = []
self.api_base = ''
self.auth_base = ''
self.token_url = ''
if 'configfile' in config:
# Configuration filename provided, so merge those settings with
# other settings (if any) in the config object. Note that any
# explicit settings in the config object take precedence over
# settings from the configfile.
filesettings = json.loads(open(config['configfile']).read())
for key in filesettings:
if key not in config:
config[key] = filesettings[key]
# store configuration settings from config object
if 'app_name' in config:
self.app_name = config['app_name']
if 'app_id' in config:
self.app_id = config['app_id']
if 'app_secret' in config:
self.app_secret = config['app_secret']
if 'redirect_url' in config:
self.redirect_url = config['redirect_url']
if 'scopes' in config:
self.scopes = config['scopes']
if 'api_base' in config:
self.api_base = config['api_base']
if 'auth_base' in config:
self.auth_base = config['auth_base']
if 'token_url' in config:
self.token_url = config['token_url']
# initialize properties used for caching session state ...
self.auth_url = ''
self.authcode = ''
self.state = ''
self.access_token = None
self.token_type = ''
self.token_expires_at = 0
self.token_scope = ''
self.refresh_token = None
self.loggedin = False
self.loggedin_id = ''
self.loggedin_name = ''
self.loggedin_email = ''
self.loggedin_public = ''
self.loggedin_photo = None
self.cache('read') # read cached state (if any)
# default is redirect to home page after login
self.after_login = '/'
if self.token_seconds() > 5:
print('>>> OAuth2Manager: ' + \
'cached access token is still valid for {0} seconds'. \
format(self.token_seconds()))
else:
print('>>> OAuth2Manager: cached access token has expired')
self.refresh_access_token()
self.cache('save')
def api_endpoint(self, url): #-------------------------------------------<<<
"""Convert a partial/relative endpoint to a full URL."""
if url.split('/')[0].lower() in ['http:', 'https:']:
return url
else:
return urllib.parse.urljoin(self.api_base, url.lstrip('/'))
def authcode_abbrev(self, authcode=None):
"""Return an abbreviated version of an authorization code, for
reporting purposes. Defaults to self.authcode if no authcode passed.
"""
if not authcode:
code = self.authcode
if not code:
return 'None'
return code[:3] + '...' + code[-3:] + ' ({0} bytes)'.format(len(code))
def authorized(self): #--------------------------------------------------<<<
"""We've been given an authorization code, so use it to request an
access token."""
# verify state received is same as state sent with the HTTPS request,
# which confirms that we initiated this login attempt
if self.state != request.query.state:
raise Exception('>>> SHUTTING DOWN: state mismatch' + \
'\n\nState SENT: {0}\n\nState RECEIVED: {1}'. \
format(str(self.state), str(request.query.state)))
self.state = '' # reset session state to prevent re-use
token_response = self.fetch_token(request.query.code)
if not token_response:
print('>>> OAuth2Manager: request for access token failed')
redirect('/')
if not token_response.ok:
# error - return to user the error text from Azure AD
return token_response.text
me_response = self.get('me')
me_data = me_response.json()
if 'error' in me_data:
print('>>> OAuth2Manager: /me endpoint returned an error ... ' + \
str(me_data))
# set properties for current user name, email, public name (with domain)
fullname = me_data['displayName']
email = me_data['userPrincipalName']
self.loggedin_id = me_data['id']
self.loggedin_name = fullname
self.loggedin_email = email
if '@' in email:
self.loggedin_public = '{0} (@{1})'. \
format(fullname, email.split('@')[1].split('.')[0])
else:
self.loggedin_public = '{0} ({1})'.format(fullname, email)
# save profile photo
profile_pic = self.get('me/photo/$value', stream=True)
if profile_pic.ok:
import base64
self.loggedin_photo = base64.b64encode(profile_pic.raw.read())
else:
self.loggedin_photo = None
self.cache('save') # update cached auth state
return redirect(self.after_login)
def cache(self, action): #-----------------------------------------------<<<
"""Manage local cache for auth status.
Three actions are supported:
'save' = save current auth status
'read' = restore auth status from cached version
'clear' = clear the cached auth status
"""
cachefile = 'cache.json'
photofile = 'cache.photo'
if action == 'save':
configdata = dict(auth_url=self.auth_url,
access_token=self.access_token,
token_expires_at=self.token_expires_at,
token_scope=self.token_scope,
refresh_token=self.refresh_token,
loggedin=self.loggedin,
loggedin_id=self.loggedin_id,
loggedin_name=self.loggedin_name,
loggedin_email=self.loggedin_email,
loggedin_public=self.loggedin_public)
open(cachefile, 'w').write(json.dumps(configdata))
if os.path.isfile(photofile):
os.remove(photofile) # clear any existing cached photo
if self.loggedin_photo:
open(photofile, 'wb').write(self.loggedin_photo)
elif action == 'read':
if os.path.isfile(cachefile):
configdata = json.loads(open(cachefile).read())
self.auth_url = configdata['auth_url']
self.access_token = configdata['access_token']
self.token_expires_at = configdata['token_expires_at']
self.token_scope = configdata['token_scope']
self.refresh_token = configdata['refresh_token']
self.loggedin = configdata['loggedin']
self.loggedin_id = configdata['loggedin_id']
self.loggedin_name = configdata['loggedin_name']
self.loggedin_email = configdata['loggedin_email']
self.loggedin_public = configdata['loggedin_public']
if os.path.isfile(photofile):
self.loggedin_photo = open(photofile, 'rb').read()
else:
# note that invalid actions will just clear the cache
if os.path.isfile(cachefile):
os.remove(cachefile)
if os.path.isfile(photofile):
os.remove(photofile)
print('>>> OAuth2Manager: local cache cleared')
def default_headers(self): #---------------------------------------------<<<
"""Returns the default HTTP headers for calls to the Graph API,
including current access token.
"""
# Note that we include a unique client-request-id HTTP header, which
# is round-tripped by the Graph API and can be used to correlate a
# request with its response for diagnostic purposes.
return {'User-Agent' : 'bottle-msgraph/1.0',
'Authorization' : 'Bearer {0}'.format(self.access_token),
'Accept' : 'application/json',
'Content-Type' : 'application/json',
'client-request-id' : str(uuid.uuid4()),
'return-client-request-id' : 'true'}
def fetch_token(self, authcode): #---------------------------------------<<<
"""Get an OAuth2 access token. Requires the authorization code returned
from auth_url."""
self.authcode = authcode
response = requests.post(self.token_url, \
data=dict(client_id=self.app_id,
client_secret=self.app_secret,
grant_type='authorization_code',
code=authcode,
redirect_uri=self.redirect_url))
if self.save_token(response):
return response
else:
return None # the request for an access token failed
def get(self, endpoint, headers=None, stream=False): #-------------------<<<
"""GET from API (authenticated with access token)."""
# refresh token if within 5 seconds of expiring
if self.token_seconds() < 5:
self.refresh_access_token()
merged_headers = self.default_headers()
if headers:
merged_headers.update(headers)
return requests.get(self.api_endpoint(endpoint),
headers=merged_headers,
stream=stream)
def login(self, redirect_to): #------------------------------------------<<<
"""Log in (authenticate against Azure AD)"""
# create the "state"" GUID, which will be round-tripped by the auth_url
# endpoint to verify that we initiated this login
self.state = str(uuid.uuid4())
# set url to redirect to after login completed
self.after_login = redirect_to
#Set the auth_url property, including all required OAuth2 parameters
self.auth_url = self.auth_base + \
('' if self.auth_base.endswith('/') else '/') + \
'?response_type=code&client_id=' + self.app_id + \
'&redirect_uri=' + self.redirect_url + \
'&scope=' + '%20'.join(self.scopes) + \
'&state=' + self.state
print('>>> OAuth2Manager: ask user to authenticate')
redirect(self.auth_url, 302)
def logout(self, redirect_to='/'): #-------------------------------------<<<
"""Log out of current connection and redirect to specified route.
If redirect_to == None, no redirection will take place and we just
clear the current logged-in status.
"""
self.loggedin = False
self.loggedin_name = ''
self.loggedin_email = ''
self.state = None
self.access_token = None
self.token_expires_at = 0
self.cache('clear') # clear cached auth state
if redirect_to:
print('>>> OAuth2Manager: user logout')
redirect(redirect_to)
def post(self, endpoint, headers=None, data=None, verify=False, params=None):
"""POST to API (authenticated with access token).
headers = custom HTTP headers (merged with defaults, including access token)
verify = the Requests option for verifying SSL certificate; defaults
to False for demo purposes. For more information see:
http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification
"""
# refresh token if within 5 seconds of expiring
if self.token_seconds() < 5:
self.refresh_access_token()
merged_headers = self.default_headers()
if headers:
merged_headers.update(headers)
return requests.post(self.api_endpoint(endpoint),
headers=merged_headers, data=data,
verify=verify, params=params)
def print_settings(self): #----------------------------------------------<<<
"""Print current property values to console."""
print('>>> OAuth2Manager properties ...')
pprint.pprint(self.__dict__)
def refresh_access_token(self): #----------------------------------------<<<
"""Refresh the current access token."""
response = requests.post(self.token_url, \
data=dict(client_id=self.app_id,
client_secret=self.app_secret,
grant_type='refresh_token',
refresh_token=self.refresh_token))
self.save_token(response)
def save_token(self, response): #----------------------------------------<<<
"""Save an access token and related metadata.
Input is the response object returned by the self.token_url endpoint.
Returns True if the token was successfully saved, False if not. (For
example, the token_url API may have returned no token.)
"""
jsondata = response.json()
if not 'access_token' in jsondata:
self.logout(redirect_to=None) # log out and clear local cache
print('>>> Oauth2Manager: request for access token failed')
return False # no access token found
self.access_token = jsondata['access_token']
self.loggedin = True # we're authenticated now
self.token_type = jsondata['token_type']
if self.token_type != 'Bearer':
print('>>> OAuth2Manager: expected Bearer token type, but received {0}'. \
format(self.token_type))
# Verify that the scopes returned include all scopes requested. The
# offline_access scope is never returned by Azure AD, so we don't
# include it in scopes_expected if present.
scopes_expected = set([_.lower() for _ in self.scopes
if _.lower() != 'offline_access'])
scopes_returned = \
set([_.lower() for _ in jsondata['scope'].split(' ')])
if scopes_expected > scopes_returned:
print('WARNING: expected scopes not returned = {1}'. \
format(' '.join(scopes_expected - scopes_returned)))
self.token_scope = jsondata['scope']
# token_expires_at = time.time() value (seconds) at which it expires
self.token_expires_at = time.time() + int(jsondata['expires_in'])
self.refresh_token = jsondata.get('refresh_token', None)
print('>>> OAuth2Manager: access token acquired ({0} bytes)'. \
format(len(self.access_token)))
return True
def token_abbrev(self, token_val=None, token_type='access'): #-----------<<<
"""Return abbreviated version of an access token for display purposes.
If a token_val is provided, that value is is abbreviated.
If no token_val is provided, the type argument determines the value:
type == 'access' (default) - self.access_token
type == 'refresh' - self.refresh_token
"""
if not token_val:
if token_type.lower() == 'refresh':
token = self.refresh_token
else:
token = self.access_token
if not token:
return 'None'
return token[:3] + '...' + token[-3:] + ' ({0} bytes)'.format(len(token))
def token_seconds(self): #-----------------------------------------------<<<
"""Return integer (rounded) number of seconds before the current access
token will expire, or 0 if already expired or no valid access token."""
if not self.access_token or time.time() >= self.token_expires_at:
return 0
return round(self.token_expires_at - time.time())
| |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 ULM to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(4, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Collection of useful coders.
Only those coders listed in __all__ are part of the public API of this module.
"""
from __future__ import absolute_import
import base64
import sys
from builtins import object
import google.protobuf.wrappers_pb2
from future.moves import pickle
from apache_beam.coders import coder_impl
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.utils import proto_utils
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from .stream import get_varint_size
except ImportError:
from .slow_stream import get_varint_size
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
# pylint: disable=wrong-import-order, wrong-import-position
# Avoid dependencies on the full SDK.
try:
# Import dill from the pickler module to make sure our monkey-patching of dill
# occurs.
from apache_beam.internal.pickler import dill
except ImportError:
# We fall back to using the stock dill library in tests that don't use the
# full Python SDK.
import dill
__all__ = ['Coder',
'BytesCoder', 'DillCoder', 'FastPrimitivesCoder', 'FloatCoder',
'IterableCoder', 'PickleCoder', 'ProtoCoder', 'SingletonCoder',
'StrUtf8Coder', 'TimestampCoder', 'TupleCoder',
'TupleSequenceCoder', 'VarIntCoder', 'WindowedValueCoder']
def serialize_coder(coder):
from apache_beam.internal import pickler
return b'%s$%s' % (coder.__class__.__name__.encode('utf-8'),
pickler.dumps(coder))
def deserialize_coder(serialized):
from apache_beam.internal import pickler
return pickler.loads(serialized.split(b'$', 1)[1])
# pylint: enable=wrong-import-order, wrong-import-position
class Coder(object):
"""Base class for coders."""
def encode(self, value):
"""Encodes the given object into a byte string."""
raise NotImplementedError('Encode not implemented: %s.' % self)
def decode(self, encoded):
"""Decodes the given byte string into the corresponding object."""
raise NotImplementedError('Decode not implemented: %s.' % self)
def is_deterministic(self):
"""Whether this coder is guaranteed to encode values deterministically.
A deterministic coder is required for key coders in GroupByKey operations
to produce consistent results.
For example, note that the default coder, the PickleCoder, is not
deterministic: the ordering of picked entries in maps may vary across
executions since there is no defined order, and such a coder is not in
general suitable for usage as a key coder in GroupByKey operations, since
each instance of the same key may be encoded differently.
Returns:
Whether coder is deterministic.
"""
return False
def as_deterministic_coder(self, step_label, error_message=None):
"""Returns a deterministic version of self, if possible.
Otherwise raises a value error.
"""
if self.is_deterministic():
return self
else:
raise ValueError(error_message or "'%s' cannot be made deterministic.")
def estimate_size(self, value):
"""Estimates the encoded size of the given value, in bytes.
Dataflow estimates the encoded size of a PCollection processed in a pipeline
step by using the estimated size of a random sample of elements in that
PCollection.
The default implementation encodes the given value and returns its byte
size. If a coder can provide a fast estimate of the encoded size of a value
(e.g., if the encoding has a fixed size), it can provide its estimate here
to improve performance.
Arguments:
value: the value whose encoded size is to be estimated.
Returns:
The estimated encoded size of the given value.
"""
return len(self.encode(value))
# ===========================================================================
# Methods below are internal SDK details that don't need to be modified for
# user-defined coders.
# ===========================================================================
def _create_impl(self):
"""Creates a CoderImpl to do the actual encoding and decoding.
"""
return coder_impl.CallbackCoderImpl(self.encode, self.decode,
self.estimate_size)
def get_impl(self):
"""For internal use only; no backwards-compatibility guarantees.
Returns the CoderImpl backing this Coder.
"""
if not hasattr(self, '_impl'):
self._impl = self._create_impl()
assert isinstance(self._impl, coder_impl.CoderImpl)
return self._impl
def __getstate__(self):
return self._dict_without_impl()
def _dict_without_impl(self):
if hasattr(self, '_impl'):
d = dict(self.__dict__)
del d['_impl']
return d
return self.__dict__
@classmethod
def from_type_hint(cls, unused_typehint, unused_registry):
# If not overridden, just construct the coder without arguments.
return cls()
def is_kv_coder(self):
return False
def key_coder(self):
if self.is_kv_coder():
raise NotImplementedError('key_coder: %s' % self)
else:
raise ValueError('Not a KV coder: %s.' % self)
def value_coder(self):
if self.is_kv_coder():
raise NotImplementedError('value_coder: %s' % self)
else:
raise ValueError('Not a KV coder: %s.' % self)
def _get_component_coders(self):
"""For internal use only; no backwards-compatibility guarantees.
Returns the internal component coders of this coder."""
# This is an internal detail of the Coder API and does not need to be
# refined in user-defined Coders.
return []
def as_cloud_object(self, coders_context=None):
"""For internal use only; no backwards-compatibility guarantees.
Returns Google Cloud Dataflow API description of this coder."""
# This is an internal detail of the Coder API and does not need to be
# refined in user-defined Coders.
value = {
# We pass coders in the form "<coder_name>$<pickled_data>" to make the
# job description JSON more readable. Data before the $ is ignored by
# the worker.
'@type':
serialize_coder(self),
'component_encodings': [
component.as_cloud_object(coders_context)
for component in self._get_component_coders()
],
}
if coders_context:
value['pipeline_proto_coder_id'] = coders_context.get_id(self)
return value
def __repr__(self):
return self.__class__.__name__
# pylint: disable=protected-access
def __eq__(self, other):
return (self.__class__ == other.__class__
and self._dict_without_impl() == other._dict_without_impl())
# pylint: enable=protected-access
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(type(self))
_known_urns = {}
@classmethod
def register_urn(cls, urn, parameter_type, fn=None):
"""Registers a urn with a constructor.
For example, if 'beam:fn:foo' had parameter type FooPayload, one could
write `RunnerApiFn.register_urn('bean:fn:foo', FooPayload, foo_from_proto)`
where foo_from_proto took as arguments a FooPayload and a PipelineContext.
This function can also be used as a decorator rather than passing the
callable in as the final parameter.
A corresponding to_runner_api_parameter method would be expected that
returns the tuple ('beam:fn:foo', FooPayload)
"""
def register(fn):
cls._known_urns[urn] = parameter_type, fn
return staticmethod(fn)
if fn:
# Used as a statement.
register(fn)
else:
# Used as a decorator.
return register
def to_runner_api(self, context):
urn, typed_param, components = self.to_runner_api_parameter(context)
return beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
environment_id=(
context.default_environment_id() if context else None),
spec=beam_runner_api_pb2.FunctionSpec(
urn=urn,
payload=typed_param.SerializeToString()
if typed_param is not None else None)),
component_coder_ids=[context.coders.get_id(c) for c in components])
@classmethod
def from_runner_api(cls, coder_proto, context):
"""Converts from an SdkFunctionSpec to a Fn object.
Prefer registering a urn with its parameter type and constructor.
"""
parameter_type, constructor = cls._known_urns[coder_proto.spec.spec.urn]
return constructor(
proto_utils.parse_Bytes(coder_proto.spec.spec.payload, parameter_type),
[context.coders.get_by_id(c) for c in coder_proto.component_coder_ids],
context)
def to_runner_api_parameter(self, context):
return (
python_urns.PICKLED_CODER,
google.protobuf.wrappers_pb2.BytesValue(value=serialize_coder(self)),
())
@staticmethod
def register_structured_urn(urn, cls):
"""Register a coder that's completely defined by its urn and its
component(s), if any, which are passed to construct the instance.
"""
cls.to_runner_api_parameter = (
lambda self, unused_context: (urn, None, self._get_component_coders()))
# pylint: disable=unused-variable
@Coder.register_urn(urn, None)
def from_runner_api_parameter(unused_payload, components, unused_context):
if components:
return cls(*components)
else:
return cls()
@Coder.register_urn(
python_urns.PICKLED_CODER, google.protobuf.wrappers_pb2.BytesValue)
def _pickle_from_runner_api_parameter(payload, components, context):
return deserialize_coder(payload.value)
class StrUtf8Coder(Coder):
"""A coder used for reading and writing strings as UTF-8."""
def encode(self, value):
return value.encode('utf-8')
def decode(self, value):
return value.decode('utf-8')
def is_deterministic(self):
return True
class ToStringCoder(Coder):
"""A default string coder used if no sink coder is specified."""
if sys.version_info.major == 2:
def encode(self, value):
# pylint: disable=unicode-builtin
return (value.encode('utf-8') if isinstance(value, unicode) # noqa: F821
else str(value))
else:
def encode(self, value):
return value if isinstance(value, bytes) else str(value).encode('utf-8')
def decode(self, _):
raise NotImplementedError('ToStringCoder cannot be used for decoding.')
def is_deterministic(self):
return True
class FastCoder(Coder):
"""Coder subclass used when a (faster) CoderImpl is supplied directly.
The Coder class defines _create_impl in terms of encode() and decode();
this class inverts that by defining encode() and decode() in terms of
_create_impl().
"""
def encode(self, value):
"""Encodes the given object into a byte string."""
return self.get_impl().encode(value)
def decode(self, encoded):
"""Decodes the given byte string into the corresponding object."""
return self.get_impl().decode(encoded)
def estimate_size(self, value):
return self.get_impl().estimate_size(value)
def _create_impl(self):
raise NotImplementedError
class BytesCoder(FastCoder):
"""Byte string coder."""
def _create_impl(self):
return coder_impl.BytesCoderImpl()
def is_deterministic(self):
return True
def as_cloud_object(self, coders_context=None):
return {
'@type': 'kind:bytes',
}
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
Coder.register_structured_urn(common_urns.coders.BYTES.urn, BytesCoder)
class VarIntCoder(FastCoder):
"""Variable-length integer coder."""
def _create_impl(self):
return coder_impl.VarIntCoderImpl()
def is_deterministic(self):
return True
def as_cloud_object(self, coders_context=None):
return {
'@type': 'kind:varint',
}
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
Coder.register_structured_urn(common_urns.coders.VARINT.urn, VarIntCoder)
class FloatCoder(FastCoder):
"""A coder used for floating-point values."""
def _create_impl(self):
return coder_impl.FloatCoderImpl()
def is_deterministic(self):
return True
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
class TimestampCoder(FastCoder):
"""A coder used for timeutil.Timestamp values."""
def _create_impl(self):
return coder_impl.TimestampCoderImpl()
def is_deterministic(self):
return True
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
class _TimerCoder(FastCoder):
"""A coder used for timer values.
For internal use."""
def __init__(self, payload_coder):
self._payload_coder = payload_coder
def _get_component_coders(self):
return [self._payload_coder]
def _create_impl(self):
return coder_impl.TimerCoderImpl(self._payload_coder.get_impl())
def is_deterministic(self):
return self._payload_coder.is_deterministic()
def __eq__(self, other):
return (type(self) == type(other)
and self._payload_coder == other._payload_coder)
def __hash__(self):
return hash(type(self)) + hash(self._payload_coder)
Coder.register_structured_urn(
common_urns.coders.TIMER.urn, _TimerCoder)
class SingletonCoder(FastCoder):
"""A coder that always encodes exactly one value."""
def __init__(self, value):
self._value = value
def _create_impl(self):
return coder_impl.SingletonCoderImpl(self._value)
def is_deterministic(self):
return True
def __eq__(self, other):
return type(self) == type(other) and self._value == other._value
def __hash__(self):
return hash(self._value)
def maybe_dill_dumps(o):
"""Pickle using cPickle or the Dill pickler as a fallback."""
# We need to use the dill pickler for objects of certain custom classes,
# including, for example, ones that contain lambdas.
try:
return pickle.dumps(o, pickle.HIGHEST_PROTOCOL)
except Exception: # pylint: disable=broad-except
return dill.dumps(o)
def maybe_dill_loads(o):
"""Unpickle using cPickle or the Dill pickler as a fallback."""
try:
return pickle.loads(o)
except Exception: # pylint: disable=broad-except
return dill.loads(o)
class _PickleCoderBase(FastCoder):
"""Base class for pickling coders."""
def is_deterministic(self):
# Note that the default coder, the PickleCoder, is not deterministic (for
# example, the ordering of picked entries in maps may vary across
# executions), and so is not in general suitable for usage as a key coder in
# GroupByKey operations.
return False
def as_cloud_object(self, coders_context=None, is_pair_like=True):
value = super(_PickleCoderBase, self).as_cloud_object(coders_context)
# We currently use this coder in places where we cannot infer the coder to
# use for the value type in a more granular way. In places where the
# service expects a pair, it checks for the "is_pair_like" key, in which
# case we would fail without the hack below.
if is_pair_like:
value['is_pair_like'] = True
value['component_encodings'] = [
self.as_cloud_object(coders_context, is_pair_like=False),
self.as_cloud_object(coders_context, is_pair_like=False)
]
return value
# We allow .key_coder() and .value_coder() to be called on PickleCoder since
# we can't always infer the return values of lambdas in ParDo operations, the
# result of which may be used in a GroupBykey.
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
class PickleCoder(_PickleCoderBase):
"""Coder using Python's pickle functionality."""
def _create_impl(self):
dumps = pickle.dumps
HIGHEST_PROTOCOL = pickle.HIGHEST_PROTOCOL
return coder_impl.CallbackCoderImpl(
lambda x: dumps(x, HIGHEST_PROTOCOL), pickle.loads)
def as_deterministic_coder(self, step_label, error_message=None):
return DeterministicFastPrimitivesCoder(self, step_label)
class DillCoder(_PickleCoderBase):
"""Coder using dill's pickle functionality."""
def _create_impl(self):
return coder_impl.CallbackCoderImpl(maybe_dill_dumps, maybe_dill_loads)
class DeterministicFastPrimitivesCoder(FastCoder):
"""Throws runtime errors when encoding non-deterministic values."""
def __init__(self, coder, step_label):
self._underlying_coder = coder
self._step_label = step_label
def _create_impl(self):
return coder_impl.DeterministicFastPrimitivesCoderImpl(
self._underlying_coder.get_impl(), self._step_label)
def is_deterministic(self):
return True
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
class FastPrimitivesCoder(FastCoder):
"""Encodes simple primitives (e.g. str, int) efficiently.
For unknown types, falls back to another coder (e.g. PickleCoder).
"""
def __init__(self, fallback_coder=PickleCoder()):
self._fallback_coder = fallback_coder
def _create_impl(self):
return coder_impl.FastPrimitivesCoderImpl(
self._fallback_coder.get_impl())
def is_deterministic(self):
return self._fallback_coder.is_deterministic()
def as_deterministic_coder(self, step_label, error_message=None):
if self.is_deterministic():
return self
else:
return DeterministicFastPrimitivesCoder(self, step_label)
def as_cloud_object(self, coders_context=None, is_pair_like=True):
value = super(FastCoder, self).as_cloud_object(coders_context)
# We currently use this coder in places where we cannot infer the coder to
# use for the value type in a more granular way. In places where the
# service expects a pair, it checks for the "is_pair_like" key, in which
# case we would fail without the hack below.
if is_pair_like:
value['is_pair_like'] = True
value['component_encodings'] = [
self.as_cloud_object(coders_context, is_pair_like=False),
self.as_cloud_object(coders_context, is_pair_like=False)
]
return value
# We allow .key_coder() and .value_coder() to be called on FastPrimitivesCoder
# since we can't always infer the return values of lambdas in ParDo
# operations, the result of which may be used in a GroupBykey.
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
class Base64PickleCoder(Coder):
"""Coder of objects by Python pickle, then base64 encoding."""
# TODO(robertwb): Do base64 encoding where it's needed (e.g. in json) rather
# than via a special Coder.
def encode(self, value):
return base64.b64encode(pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
def decode(self, encoded):
return pickle.loads(base64.b64decode(encoded))
def is_deterministic(self):
# Note that the Base64PickleCoder is not deterministic. See the
# corresponding comments for PickleCoder above.
return False
# We allow .key_coder() and .value_coder() to be called on Base64PickleCoder
# since we can't always infer the return values of lambdas in ParDo
# operations, the result of which may be used in a GroupBykey.
#
# TODO(ccy): this is currently only used for KV values from Create transforms.
# Investigate a way to unify this with PickleCoder.
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
class ProtoCoder(FastCoder):
"""A Coder for Google Protocol Buffers.
It supports both Protocol Buffers syntax versions 2 and 3. However,
the runtime version of the python protobuf library must exactly match the
version of the protoc compiler what was used to generate the protobuf
messages.
ProtoCoder is registered in the global CoderRegistry as the default coder for
any protobuf Message object.
"""
def __init__(self, proto_message_type):
self.proto_message_type = proto_message_type
def _create_impl(self):
return coder_impl.ProtoCoderImpl(self.proto_message_type)
def is_deterministic(self):
# TODO(vikasrk): A proto message can be deterministic if it does not contain
# a Map.
return False
def __eq__(self, other):
return (type(self) == type(other)
and self.proto_message_type == other.proto_message_type)
def __hash__(self):
return hash(self.proto_message_type)
@staticmethod
def from_type_hint(typehint, unused_registry):
if issubclass(typehint, google.protobuf.message.Message):
return ProtoCoder(typehint)
else:
raise ValueError(('Expected a subclass of google.protobuf.message.Message'
', but got a %s' % typehint))
class TupleCoder(FastCoder):
"""Coder of tuple objects."""
def __init__(self, components):
self._coders = tuple(components)
def _create_impl(self):
return coder_impl.TupleCoderImpl([c.get_impl() for c in self._coders])
def is_deterministic(self):
return all(c.is_deterministic() for c in self._coders)
def as_deterministic_coder(self, step_label, error_message=None):
if self.is_deterministic():
return self
else:
return TupleCoder([c.as_deterministic_coder(step_label, error_message)
for c in self._coders])
@staticmethod
def from_type_hint(typehint, registry):
return TupleCoder([registry.get_coder(t) for t in typehint.tuple_types])
def as_cloud_object(self, coders_context=None):
if self.is_kv_coder():
return {
'@type':
'kind:pair',
'is_pair_like':
True,
'component_encodings': [
component.as_cloud_object(coders_context)
for component in self._get_component_coders()
],
}
return super(TupleCoder, self).as_cloud_object(coders_context)
def _get_component_coders(self):
return self.coders()
def coders(self):
return self._coders
def is_kv_coder(self):
return len(self._coders) == 2
def key_coder(self):
if len(self._coders) != 2:
raise ValueError('TupleCoder does not have exactly 2 components.')
return self._coders[0]
def value_coder(self):
if len(self._coders) != 2:
raise ValueError('TupleCoder does not have exactly 2 components.')
return self._coders[1]
def __repr__(self):
return 'TupleCoder[%s]' % ', '.join(str(c) for c in self._coders)
def __eq__(self, other):
return (type(self) == type(other)
and self._coders == self._coders)
def __hash__(self):
return hash(self._coders)
def to_runner_api_parameter(self, context):
if self.is_kv_coder():
return common_urns.coders.KV.urn, None, self.coders()
else:
return super(TupleCoder, self).to_runner_api_parameter(context)
@Coder.register_urn(common_urns.coders.KV.urn, None)
def from_runner_api_parameter(unused_payload, components, unused_context):
return TupleCoder(components)
class TupleSequenceCoder(FastCoder):
"""Coder of homogeneous tuple objects."""
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def _create_impl(self):
return coder_impl.TupleSequenceCoderImpl(self._elem_coder.get_impl())
def is_deterministic(self):
return self._elem_coder.is_deterministic()
def as_deterministic_coder(self, step_label, error_message=None):
if self.is_deterministic():
return self
else:
return TupleSequenceCoder(
self._elem_coder.as_deterministic_coder(step_label, error_message))
@staticmethod
def from_type_hint(typehint, registry):
return TupleSequenceCoder(registry.get_coder(typehint.inner_type))
def _get_component_coders(self):
return (self._elem_coder,)
def __repr__(self):
return 'TupleSequenceCoder[%r]' % self._elem_coder
def __eq__(self, other):
return (type(self) == type(other)
and self._elem_coder == self._elem_coder)
def __hash__(self):
return hash((type(self), self._elem_coder))
class IterableCoder(FastCoder):
"""Coder of iterables of homogeneous objects."""
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def _create_impl(self):
return coder_impl.IterableCoderImpl(self._elem_coder.get_impl())
def is_deterministic(self):
return self._elem_coder.is_deterministic()
def as_deterministic_coder(self, step_label, error_message=None):
if self.is_deterministic():
return self
else:
return IterableCoder(
self._elem_coder.as_deterministic_coder(step_label, error_message))
def as_cloud_object(self, coders_context=None):
return {
'@type':
'kind:stream',
'is_stream_like':
True,
'component_encodings': [
self._elem_coder.as_cloud_object(coders_context)
],
}
def value_coder(self):
return self._elem_coder
@staticmethod
def from_type_hint(typehint, registry):
return IterableCoder(registry.get_coder(typehint.inner_type))
def _get_component_coders(self):
return (self._elem_coder,)
def __repr__(self):
return 'IterableCoder[%r]' % self._elem_coder
def __eq__(self, other):
return (type(self) == type(other)
and self._elem_coder == self._elem_coder)
def __hash__(self):
return hash((type(self), self._elem_coder))
Coder.register_structured_urn(common_urns.coders.ITERABLE.urn, IterableCoder)
class GlobalWindowCoder(SingletonCoder):
"""Coder for global windows."""
def __init__(self):
from apache_beam.transforms import window
super(GlobalWindowCoder, self).__init__(window.GlobalWindow())
def as_cloud_object(self, coders_context=None):
return {
'@type': 'kind:global_window',
}
Coder.register_structured_urn(
common_urns.coders.GLOBAL_WINDOW.urn, GlobalWindowCoder)
class IntervalWindowCoder(FastCoder):
"""Coder for an window defined by a start timestamp and a duration."""
def _create_impl(self):
return coder_impl.IntervalWindowCoderImpl()
def is_deterministic(self):
return True
def as_cloud_object(self, coders_context=None):
return {
'@type': 'kind:interval_window',
}
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
Coder.register_structured_urn(
common_urns.coders.INTERVAL_WINDOW.urn, IntervalWindowCoder)
class WindowedValueCoder(FastCoder):
"""Coder for windowed values."""
def __init__(self, wrapped_value_coder, window_coder=None):
if not window_coder:
window_coder = PickleCoder()
self.wrapped_value_coder = wrapped_value_coder
self.timestamp_coder = TimestampCoder()
self.window_coder = window_coder
def _create_impl(self):
return coder_impl.WindowedValueCoderImpl(
self.wrapped_value_coder.get_impl(),
self.timestamp_coder.get_impl(),
self.window_coder.get_impl())
def is_deterministic(self):
return all(c.is_deterministic() for c in [self.wrapped_value_coder,
self.timestamp_coder,
self.window_coder])
def as_cloud_object(self, coders_context=None):
return {
'@type':
'kind:windowed_value',
'is_wrapper':
True,
'component_encodings': [
component.as_cloud_object(coders_context)
for component in self._get_component_coders()
],
}
def _get_component_coders(self):
return [self.wrapped_value_coder, self.window_coder]
def is_kv_coder(self):
return self.wrapped_value_coder.is_kv_coder()
def key_coder(self):
return self.wrapped_value_coder.key_coder()
def value_coder(self):
return self.wrapped_value_coder.value_coder()
def __repr__(self):
return 'WindowedValueCoder[%s]' % self.wrapped_value_coder
def __eq__(self, other):
return (type(self) == type(other)
and self.wrapped_value_coder == other.wrapped_value_coder
and self.timestamp_coder == other.timestamp_coder
and self.window_coder == other.window_coder)
def __hash__(self):
return hash(
(self.wrapped_value_coder, self.timestamp_coder, self.window_coder))
Coder.register_structured_urn(
common_urns.coders.WINDOWED_VALUE.urn, WindowedValueCoder)
class LengthPrefixCoder(FastCoder):
"""For internal use only; no backwards-compatibility guarantees.
Coder which prefixes the length of the encoded object in the stream."""
def __init__(self, value_coder):
self._value_coder = value_coder
def _create_impl(self):
return coder_impl.LengthPrefixCoderImpl(self._value_coder.get_impl())
def is_deterministic(self):
return self._value_coder.is_deterministic()
def estimate_size(self, value):
value_size = self._value_coder.estimate_size(value)
return get_varint_size(value_size) + value_size
def value_coder(self):
return self._value_coder
def as_cloud_object(self, coders_context=None):
return {
'@type':
'kind:length_prefix',
'component_encodings': [
self._value_coder.as_cloud_object(coders_context)
],
}
def _get_component_coders(self):
return (self._value_coder,)
def __repr__(self):
return 'LengthPrefixCoder[%r]' % self._value_coder
def __eq__(self, other):
return (type(self) == type(other)
and self._value_coder == other._value_coder)
def __hash__(self):
return hash((type(self), self._value_coder))
Coder.register_structured_urn(
common_urns.coders.LENGTH_PREFIX.urn, LengthPrefixCoder)
| |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A subclass of the ipaddr library that includes comments for ipaddr objects."""
__author__ = 'watson@google.com (Tony Watson)'
from third_party import ipaddr
def IP(ipaddress, comment='', token=''):
"""Take an ip string and return an object of the correct type.
Args:
ip_string: the ip address.
comment:: option comment field
token:: option token name where this address was extracted from
Returns:
ipaddr.IPv4 or ipaddr.IPv6 object or raises ValueError.
Raises:
ValueError: if the string passed isn't either a v4 or a v6 address.
Notes:
this is sort of a poor-mans factory method.
"""
a = ipaddr.IPNetwork(ipaddress)
if a.version == 4:
return IPv4(ipaddress, comment, token)
elif a.version == 6:
return IPv6(ipaddress, comment, token)
class IPv4(ipaddr.IPv4Network):
"""This subclass allows us to keep text comments related to each object."""
def __init__(self, ip_string, comment='', token=''):
ipaddr.IPv4Network.__init__(self, ip_string)
self.text = comment
self.token = token
self.parent_token = token
def AddComment(self, comment=''):
"""Append comment to self.text, comma seperated.
Don't add the comment if it's the same as self.text.
Args: comment
"""
if self.text:
if comment and comment not in self.text:
self.text += ', ' + comment
else:
self.text = comment
def supernet(self, prefixlen_diff=1):
"""Override ipaddr.IPv4 supernet so we can maintain comments.
See ipaddr.IPv4.Supernet for complete documentation.
"""
if self.prefixlen == 0:
return self
if self.prefixlen - prefixlen_diff < 0:
raise PrefixlenDiffInvalidError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' % (
self.prefixlen, prefixlen_diff))
ret_addr = IPv4(ipaddr.IPv4Network.supernet(self, prefixlen_diff),
comment=self.text, token=self.token)
return ret_addr
# Backwards compatibility name from v1.
Supernet = supernet
class IPv6(ipaddr.IPv6Network):
"""This subclass allows us to keep text comments related to each object."""
def __init__(self, ip_string, comment='', token=''):
ipaddr.IPv6Network.__init__(self, ip_string)
self.text = comment
self.token = token
self.parent_token = token
def supernet(self, prefixlen_diff=1):
"""Override ipaddr.IPv6Network supernet so we can maintain comments.
See ipaddr.IPv6Network.Supernet for complete documentation.
"""
if self.prefixlen == 0:
return self
if self.prefixlen - prefixlen_diff < 0:
raise PrefixlenDiffInvalidError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' % (
self.prefixlen, prefixlen_diff))
ret_addr = IPv6(ipaddr.IPv6Network.supernet(self, prefixlen_diff),
comment=self.text, token=self.token)
return ret_addr
# Backwards compatibility name from v1.
Supernet = supernet
def AddComment(self, comment=''):
"""Append comment to self.text, comma seperated.
Don't add the comment if it's the same as self.text.
Args: comment
"""
if self.text:
if comment and comment not in self.text:
self.text += ', ' + comment
else:
self.text = comment
def CollapseAddrListRecursive(addresses):
"""Recursively loops through the addresses, collapsing concurent netblocks.
Example:
ip1 = ipaddr.IPv4Network('1.1.0.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.0/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
ip4 = ipaddr.IPv4Network('1.1.3.0/24')
ip5 = ipaddr.IPv4Network('1.1.4.0/24')
ip6 = ipaddr.IPv4Network('1.1.0.1/22')
CollapseAddrRecursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
Note, this shouldn't be called directly, but is called via
CollapseAddr([])
Args:
addresses: List of IPv4 or IPv6 objects
Returns:
List of IPv4 or IPv6 objects (depending on what we were passed)
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if ret_array[-1].Contains(cur_addr):
# save the comment from the subsumed address
ret_array[-1].AddComment(cur_addr.text)
optimized = True
elif cur_addr == ret_array[-1].Supernet().Subnet()[1]:
ret_array.append(ret_array.pop().Supernet())
# save the text from the subsumed address
ret_array[-1].AddComment(cur_addr.text)
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return CollapseAddrListRecursive(ret_array)
return ret_array
def CollapseAddrList(addresses):
"""Collapse an array of IP objects.
Example: CollapseAddr(
[IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) -> [IPv4('1.1.0.0/23')]
Note: this works just as well with IPv6 addresses too.
Args:
addresses: list of ipaddr.IPNetwork objects
Returns:
list of ipaddr.IPNetwork objects
"""
return CollapseAddrListRecursive(
sorted(addresses, key=ipaddr._BaseNet._get_networks_key))
def SortAddrList(addresses):
"""Return a sorted list of nacaddr objects."""
return sorted(addresses, key=ipaddr._BaseNet._get_networks_key)
def RemoveAddressFromList(superset, exclude):
"""Remove a single address from a list of addresses.
Args:
superset: a List of nacaddr IPv4 or IPv6 addresses
exclude: a single nacaddr IPv4 or IPv6 address
Returns:
a List of nacaddr IPv4 or IPv6 addresses
"""
ret_array = []
for addr in superset:
if exclude == addr or addr in exclude:
# this is a bug in ipaddr v1. IP('1.1.1.1').AddressExclude(IP('1.1.1.1'))
# raises an error. Not tested in v2 yet.
pass
elif exclude.version == addr.version and exclude in addr:
ret_array.extend([IP(x) for x in addr.AddressExclude(exclude)])
else:
ret_array.append(addr)
return ret_array
def AddressListExclude(superset, excludes):
"""Remove a list of addresses from another list of addresses.
Args:
superset: a List of nacaddr IPv4 or IPv6 addresses
excludes: a List nacaddr IPv4 or IPv6 addresses
Returns:
a List of nacaddr IPv4 or IPv6 addresses
"""
superset = CollapseAddrList(superset)
excludes = CollapseAddrList(excludes)
ret_array = []
for ex in excludes:
superset = RemoveAddressFromList(superset, ex)
return CollapseAddrList(superset)
ExcludeAddrs = AddressListExclude
class PrefixlenDiffInvalidError(ipaddr.NetmaskValueError):
"""Holdover from ipaddr v1."""
if __name__ == '__main__':
pass
| |
"""Module for parsing and representing calendar dates in gedcom format.
"""
__all__ = [
"CalendarType", "CalendarDate", "FrenchDate", "GregorianDate",
"HebrewDate", "JulianDate", "CalendarDateVisitor",
]
import abc
import enum
import re
import convertdate.french_republican
import convertdate.gregorian
import convertdate.hebrew
import convertdate.julian
MONTHS_GREG = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG',
'SEP', 'OCT', 'NOV', 'DEC']
MONTHS_HEBR = ['TSH', 'CSH', 'KSL', 'TVT', 'SHV', 'ADR', 'ADS', 'NSN',
'IYR', 'SVN', 'TMZ', 'AAV', 'ELL']
MONTHS_FREN = ['VEND', 'BRUM', 'FRIM', 'NIVO', 'PLUV', 'VENT', 'GERM',
'FLOR', 'PRAI', 'MESS', 'THER', 'FRUC', 'COMP']
# DATE := [<DATE_CALENDAR_ESCAPE> | <NULL>] <DATE_CALENDAR>
# <DATE_CALENDAR> := [<YEAR> | <MONTH> <YEAR> | <DAY> <MONTH> <YEAR>]
# <YEAR can be specified as "1000B.C." or "1699/00"
# <MONTH> is all characters.
# This does not use named groups, it may appear few times in other expressions
# Groups: 1: calendar; 2: day; 3: month; 4: year
# Note: this definition is also used in date.py
DATE = r"""
(?:@\#D([\w ]+)@\s+)? # @#DCALENDAR@, optional (group=1)
(?:
(?:(\d+)\s+)? # day (int), optional (group=2)
([A-Z]{3,4})\s+ # month, name 3-4 chars (group=3)
)?
(?:
(\d+)(?:/(\d+))? # year, required, number with optional /NUMBER
# (group=4,5)
(\s*?B\.C\.)? # optional B.C. suffix (group=6)
)
"""
DATE_RE = re.compile("^" + DATE + "$", re.X | re.I)
@enum.unique
class CalendarType(enum.Enum):
"""Namespace for constants defining names of calendars.
Note that it does not define constants for ``ROMAN`` calendar which is
declared in GEDCOM standard as a placeholder for future definition, or
``UNKNOWN`` calendar which is not supported by this library.
The constants defined in this namespace are used for the values of the
`CalendarDate.calendar` attribute. Each separate class implementing
`CalendarDate` interface uses distinct value for that attribute,
and this value can be used to deduce actual type of the
`CalendarDate` instance.
"""
GREGORIAN = "GREGORIAN"
"""This is the value assigned to `GregorianDate.calendar` attribute.
"""
JULIAN = "JULIAN"
"""This is the value assigned to `JulianDate.calendar` attribute.
"""
HEBREW = "HEBREW"
"""This is the value assigned to `HebrewDate.calendar` attribute.
"""
FRENCH_R = "FRENCH R"
"""This is the value assigned to `FrenchDate.calendar` attribute.
"""
class CalendarDate(metaclass=abc.ABCMeta):
"""Interface for calendar date representation.
Parameters
----------
year : `int`
Calendar year number. If ``bc`` parameter is ``True`` then this year
is before "epoch" of that calendar.
month : `str`
Name of the month. Optional, but if day is given then month cannot be
None.
day : `int`
Day in a month, optional.
bc : `bool`
``True`` if year has "B.C."
original : `str`
Original string representation of this date as it was specified in
GEDCOM file, could be ``None``.
Notes
-----
This class defines attributes and methods that are common for all
calendars defined in GEDCOM (though the meaning and representation can be
different in different calendars). In GEDCOM date consists of year, month,
and day; day and month are optional (either day or day+month), year must
be present. Day is a number, month is month name in a given calendar.
Year is a number optionally followed by ``B.C.`` or ``/NUMBER`` (latter
is defined for Gregorian calendar only).
Implementation for different calendars are provided by subclasses which
can implement additional attributes or methods. All subclasses need to
implement `key()` method to support ordering of the dates from
different calendars. There are presently four implementations defined
in this module:
- `GregorianDate` for "GREGORIAN" calendar
- `JulianDate` for "JULIAN" calendar
- `HebrewDate` for "HEBREW" calendar
- `FrenchDate` for "FRENCH R" calendar
To implement type-specific code on client side one can use one of these
approaches:
- dispatch based on the value of `calendar` attribute, it has
one of the values defined in `CalendarType` enum,
the value maps uniquely to an implementation class;
- dispatch based on the type of the instance using ``isinstance``
method to check the type (e.g. ``isinstance(date, GregorianDate)``);
- double dispatch (visitor pattern) by implementing
`CalendarDateVisitor` interface.
"""
def __init__(self, year, month=None, day=None, bc=False, original=None):
self.year = year
"""Calendar year number (`int`)"""
self.month = None if month is None else month.upper()
"""Month name or ``None`` (`str`)"""
self.day = day
"""Day number or ``None`` (`int`)"""
self.bc = bc
"""Flag which is ``True`` if year has a "B.C" suffix (`bool`)."""
self.original = original
"""Original string representation of this date as it was specified in
GEDCOM file, could be ``None`` (`str`).
"""
self.month_num = None
"""Integer month number (1-based) or ``None`` if month name is not
given or unknown (`int`).
"""
# determine month number
months = self.months()
try:
self.month_num = months.index(self.month) + 1
except ValueError:
pass
@classmethod
@abc.abstractmethod
def months(self):
"""Ordered list of month names (in GEDCOM format) defined in calendar.
"""
raise NotImplementedError()
@abc.abstractmethod
def key(self):
"""Return ordering key for this instance.
Returned key is a tuple with two numbers (jd, flag). ``jd`` is the
Julian Day number as floating point, ``flag`` is an integer flag.
If month or day is not known then last month or last day should be
returned in its place (in corresponding calendar, and converted to
JD) and ``flag`` should be set to 1. If date and month are known then
flag should be set to 0.
"""
raise NotImplementedError()
@property
def year_str(self):
"""Calendar year in string representation, this can include dual year
and/or B.C. suffix (`str`)
"""
year = str(self.year)
if self.bc:
year += " B.C."
return year
@property
@abc.abstractmethod
def calendar(self):
"""Calendar used for this date, one of the `CalendarType` enums
(`CalendarType`)
"""
raise NotImplementedError()
@abc.abstractmethod
def accept(self, visitor):
"""Implementation of visitor pattern.
Each concrete sub-class will implement this method by dispatching the
call to corresponding visitor method.
Parameters
----------
visitor : `CalendarDateVisitor`
Visitor instance.
Returns
-------
value : `object`
Value returned from a visitor method.
"""
raise NotImplementedError()
@classmethod
def parse(cls, datestr):
"""Parse ``<DATE>`` string and make `CalendarDate` from it.
Parameters
----------
datestr : `str`
String with GEDCOM date.
Returns
-------
date : `CalendarDate`
Date instance.
Raises
------
ValueError
Raised if parsing fails.
"""
def _dual_year(year_str, dual_year_str):
"""Guess dual year, returns actual year number.
In GEDCOM dual year uses last two digits of the year number
(though some implementations use four digits). This method
tries to guess actual year number from the digits that were
given, e.g. "1650/51" -> 1651; "1699/00" -> 1700.
"""
if dual_year_str is None:
return None
if len(dual_year_str) >= len(year_str):
return int(dual_year_str)
dual_year_str = year_str[:len(year_str)-len(dual_year_str)] + dual_year_str
year = int(year_str)
dual_year = int(dual_year_str)
while dual_year < year:
dual_year += 100
return dual_year
m = DATE_RE.match(datestr)
if m is None:
raise ValueError("Failed to parse date: " + datestr)
calendar_name = m.group(1) or "GREGORIAN"
try:
calendar = CalendarType(calendar_name)
except ValueError:
raise ValueError("Unknown calendar: " + datestr)
day = None if m.group(2) is None else int(m.group(2))
month = m.group(3)
year = int(m.group(4))
dual_year = _dual_year(m.group(4), m.group(5))
bc = m.group(6) is not None
if dual_year is not None and calendar != CalendarType.GREGORIAN:
raise ValueError("Cannot use dual year (YYYY/YY) in non-Gregorian calendar: " + datestr)
if calendar == CalendarType.GREGORIAN:
return GregorianDate(year, month, day, bc=bc, original=datestr, dual_year=dual_year)
elif calendar == CalendarType.JULIAN:
return JulianDate(year, month, day, bc=bc, original=datestr)
elif calendar == CalendarType.FRENCH_R:
return FrenchDate(year, month, day, bc=bc, original=datestr)
elif calendar == CalendarType.HEBREW:
return HebrewDate(year, month, day, bc=bc, original=datestr)
else:
raise ValueError("Unknown calendar: " + datestr)
def __lt__(self, other):
return self.key() < other.key()
def __le__(self, other):
return self.key() <= other.key()
def __eq__(self, other):
return self.key() == other.key()
def __ne__(self, other):
return self.key() != other.key()
def __gt__(self, other):
return self.key() > other.key()
def __ge__(self, other):
return self.key() >= other.key()
def __hash__(self):
return hash(self.key())
def __str__(self):
"""Make printable representation out of this instance.
"""
val = [self.day, self.month, self.year_str]
if self.calendar != CalendarType.GREGORIAN:
val = ["@#D{}@".format(self.calendar.value)] + val
return " ".join([str(item) for item in val if item is not None])
def __repr__(self):
return str(self)
class GregorianDate(CalendarDate):
"""Implementation of `CalendarDate` for Gregorian calendar.
Parameter ``dual_year`` (and corresponding attribute) is used for dual
year. Other parameters have the same meaning as in `CalendarDate`
class.
Parameters
----------
dual_year : `int`, optional
Dual year number or ``None``. Actual year should be given, not just
two last digits.
Notes
-----
In GEDCOM Gregorian calendar dates are allowed to specify year in the
form YEAR1/YEAR2 (a.k.a.) dual-dating. Second number is used to specify
year as if calendar year starts in January, while the first number is
used for actual calendar year which starts at different date. Note that
GEDCOM specifies that dual year uses just two last digits in the dual
year number, though some implementations use 4 digits. This class expects
actual year number (e.g. as if it was specified as "1699/1700").
"""
def __init__(self, year, month=None, day=None, bc=False, original=None, dual_year=None):
CalendarDate.__init__(self, year, month, day, bc, original)
self.dual_year = dual_year
"""If not ``None`` then this number represent year in a calendar with
year starting on January 1st (`int`).
"""
@classmethod
def months(self):
"""Ordered list of month names (in GEDCOM format) defined in calendar.
"""
return MONTHS_GREG
@property
def calendar(self):
# docstring inherited from base class
return CalendarType.GREGORIAN
def key(self):
"""Return ordering key for this instance.
"""
calendar = convertdate.gregorian
# In dual dating use second year
year = self.dual_year if self.dual_year is not None else self.year
if self.bc:
year = - year
day = self.day
offset = 0.
if self.month_num is None:
# Take Jan 1 as next year
year += 1
month = 1
day = 1
offset = 1.
elif self.day is None:
month = self.month_num + 1
if month == 13:
month -= 12
year += 1
day = 1
offset = 1.
else:
month = self.month_num
dates = [
(year, month, day, offset),
(year, month + 1, 1, 1.),
(year + 1, 1, 1, 1.),
]
for year, month, day, offset in dates:
try:
jd = calendar.to_jd(year, month, day) - offset
break
except ValueError:
# Likely a non-existing date, use another
pass
else:
# nothing works, use arbitrary date in the future
jd = 2816787.5
flag = 1 if self.day is None or self.month_num is None else 0
return jd, flag
@property
def year_str(self):
"""Calendar year in string representation, this can include dual year
and/or B.C. suffix (`str`)
"""
year = str(self.year)
if self.dual_year is not None:
year += "/" + str(self.dual_year)[-2:]
if self.bc:
year += " B.C."
return year
def __str__(self):
"""Make printable representation out of this instance.
"""
val = [self.day, self.month, self.year_str]
return " ".join([str(item) for item in val if item is not None])
def accept(self, visitor):
return visitor.visitGregorian(self)
class JulianDate(CalendarDate):
"""Implementation of `CalendarDate` for Julian calendar.
All parameters have the same meaning as in `CalendarDate` class.
"""
def __init__(self, year, month=None, day=None, bc=False, original=None):
CalendarDate.__init__(self, year, month, day, bc, original)
@classmethod
def months(self):
"""Ordered list of month names (in GEDCOM format) defined in calendar.
"""
return MONTHS_GREG
def key(self):
"""Return ordering key for this instance.
"""
calendar = convertdate.julian
year = - self.year if self.bc else self.year
day = self.day
offset = 0.
if self.month_num is None:
# Take Jan 1 as next year
year += 1
month = 1
day = 1
offset = 1.
elif self.day is None:
month = self.month_num + 1
if month == 13:
month -= 12
year += 1
day = 1
offset = 1.
else:
month = self.month_num
dates = [
(year, month, day, offset),
(year, month + 1, 1, 1.),
(year + 1, 1, 1, 1.),
]
for year, month, day, offset in dates:
try:
jd = calendar.to_jd(year, month, day) - offset
break
except ValueError:
# Likely a non-existing date, use another
pass
else:
# nothing works, use arbitrary date in the future
jd = 2816787.5
flag = 1 if self.day is None or self.month_num is None else 0
return jd, flag
@property
def calendar(self):
# docstring inherited from base class
return CalendarType.JULIAN
def accept(self, visitor):
return visitor.visitJulian(self)
class HebrewDate(CalendarDate):
"""Implementation of `CalendarDate` for Hebrew calendar.
All parameters have the same meaning as in `CalendarDate` class.
"""
def __init__(self, year, month=None, day=None, bc=False, original=None):
CalendarDate.__init__(self, year, month, day, bc, original)
@classmethod
def months(self):
"""Ordered list of month names (in GEDCOM format) defined in calendar.
"""
return MONTHS_HEBR
def key(self):
"""Return ordering key for this instance.
"""
calendar = convertdate.hebrew
year = - self.year if self.bc else self.year
month = self.month_num or calendar.year_months(year)
day = self.day if self.day is not None else calendar.month_days(year, month)
dates = [
(year, month, day, 0.),
(year, month + 1, 1, 1.),
(year + 1, 1, 1, 1.),
]
for year, month, day, offset in dates:
try:
jd = calendar.to_jd(year, month, day) - offset
break
except ValueError:
# Likely a non-existing date, use another
pass
else:
# nothing works, use arbitrary date in the future
jd = 2816787.5
flag = 1 if self.day is None or self.month_num is None else 0
return jd, flag
@property
def calendar(self):
# docstring inherited from base class
return CalendarType.HEBREW
def accept(self, visitor):
return visitor.visitHebrew(self)
class FrenchDate(CalendarDate):
"""Implementation of `CalendarDate` for French Republican calendar.
All parameters have the same meaning as in `CalendarDate` class.
"""
def __init__(self, year, month=None, day=None, bc=False, original=None):
CalendarDate.__init__(self, year, month, day, bc, original)
@classmethod
def months(self):
"""Ordered list of month names (in GEDCOM format) defined in calendar.
"""
return MONTHS_FREN
def key(self):
"""Return ordering key for this instance.
"""
calendar = convertdate.french_republican
year = - self.year if self.bc else self.year
month = self.month_num or 13
day = self.day
if day is None:
if month == 13:
# very short "month"
day = 5
else:
day = 30
dates = [
(year, month, day, 0.),
(year, month + 1, 1, 1.),
(year + 1, 1, 1, 1.),
]
for year, month, day, offset in dates:
try:
jd = calendar.to_jd(year, month, day) - offset
break
except ValueError:
# Likely a non-existing date, use another
pass
else:
# nothing works, use arbitrary date in the future
jd = 2816787.5
flag = 1 if self.day is None or self.month_num is None else 0
return jd, flag
@property
def calendar(self):
# docstring inherited from base class
return CalendarType.FRENCH_R
def accept(self, visitor):
return visitor.visitFrench(self)
class CalendarDateVisitor(metaclass=abc.ABCMeta):
"""Interface for implementation of Visitor pattern for
`CalendarDate` classes.
One can easily extend behavior of the `CalendarDate` class
hierarchy without modifying classes themselves. Clients need to implement
new behavior by sub-classing `CalendarDateVisitor` and calling
`CalendarDate.accept()` method, e.g.::
class FormatterVisitor(CalendarDateVisitor):
def visitGregorian(self, date):
return "Gregorian date:" + str(date)
# and so on for each date type
visitor = FormatterVisitor()
date = CalendarDate.parse(date_string)
formatted = date.accept(visitor)
"""
@abc.abstractmethod
def visitGregorian(self, date):
"""Visit an instance of `GregorianDate` type.
Parameters
----------
date : `GregorianDate`
Date instance.
Returns
-------
value : `object`
Implementation of this method can return anything, value will be
returned from `CalendarDate.accept()` method.
"""
raise NotImplementedError()
@abc.abstractmethod
def visitJulian(self, date):
"""Visit an instance of `JulianDate` type.
Parameters
----------
date : `JulianDate`
Date instance.
Returns
-------
value : `object`
Implementation of this method can return anything, value will be
returned from `CalendarDate.accept()` method.
"""
raise NotImplementedError()
@abc.abstractmethod
def visitHebrew(self, date):
"""Visit an instance of `HebrewDate` type.
Parameters
----------
date : `HebrewDate`
Date instance.
Returns
-------
value : `object`
Implementation of this method can return anything, value will be
returned from `CalendarDate.accept()` method.
"""
raise NotImplementedError()
@abc.abstractmethod
def visitFrench(self, date):
"""Visit an instance of `FrenchDate` type.
Parameters
----------
date : `FrenchDate`
Date instance.
Returns
-------
value : `object`
Implementation of this method can return anything, value will be
returned from `CalendarDate.accept()` method.
"""
raise NotImplementedError()
| |
# -*- coding: utf-8
import yaml
import json
import re
import collections
from etcd import EtcdKeyNotFound
from .specs import render_app_spec, AppType
from lain_sdk.yaml.parser import ProcType, LainConf
from commons.settings import PRIVATE_REGISTRY
from .utils import (
normalize_meta_version,
search_images_from_registry,
get_meta_from_registry,
read_from_etcd,
delete_from_etcd,
set_value_to_etcd,
get_meta_version_from_tag,
get_current_time,
get_domains,
)
from commons.miscs import (
InvalidMetaVersion,
InvalidLainYaml,
InvalidStoreData,
DoesNotExist,
)
from log import logger
APP_RUNNING_STATE = {
'REPO': 'reposited',
'DEPLOYING': 'deploying',
'DEPLOY': 'deployed'
}
class BaseApp:
ETCD_PREFIX = '/lain/fake'
appname = ''
giturl = ''
meta_version = ''
meta = ''
default_image = ''
running_state = ''
app_type = ''
last_error = ''
last_update = ''
# property saved in memory but not in etcd
_registry_tags = []
_latest_meta_version = ''
_app_spec = None
_lain_config = None
@classmethod
def etcd_app_key(cls, appname):
assert appname
return "%s/%s" % (cls.ETCD_PREFIX, appname)
@classmethod
def render_app_from_etcd_value(cls, etcd_value):
try:
app_info = json.loads(etcd_value)
appname = app_info.get('appname', '')
giturl = app_info.get('giturl', '')
meta_version = app_info.get('meta_version', '')
meta = app_info.get('meta', '')
default_image = app_info.get('default_image', '')
running_state = app_info.get('running_state', None)
app_type = app_info.get('app_type', None)
last_update = app_info.get('last_update', '')
last_error = app_info.get('last_error', '')
if appname == '':
raise InvalidStoreData("appname should not be empty")
app = cls()
app.appname = appname
app.giturl = giturl
app.meta_version = meta_version
app.meta = meta
app.default_image = default_image
app.running_state = running_state
app.app_type = app_type
app.last_update = last_update
app.last_error = last_error
app._registry_tags = []
app._latest_meta_version = ''
app._app_spec = None
app._lain_config = None
return app
except ValueError, e:
raise InvalidStoreData(e)
@classmethod
def get_or_none(cls, appname):
app = None
try:
app = cls.get(appname)
except DoesNotExist:
pass
return app
@classmethod
def get(cls, appname):
try:
etcd_r = read_from_etcd(cls.etcd_app_key(appname))
if etcd_r.dir:
raise InvalidStoreData("Store Data should not be dir")
except EtcdKeyNotFound, e:
raise DoesNotExist(e)
return cls.render_app_from_etcd_value(etcd_r.value) # pylint: disable=E1103
@classmethod
def all(cls):
try:
apps_root_r = read_from_etcd(cls.ETCD_PREFIX)
except EtcdKeyNotFound, e:
logger.warn("call App.all() fail: %s" % e)
return []
apps = []
for l in apps_root_r.leaves:
appname = l.key[len(cls.ETCD_PREFIX) + 1:] # FIXME: ugly
try:
app = cls.get(appname)
apps.append(app)
except:
logger.error("error getting app %s from etcd" % appname)
return apps
@classmethod
def create(cls, appname):
app = cls()
app.appname = appname
app.running_state = APP_RUNNING_STATE['REPO']
app.save()
return app
def clear(self):
self.giturl = ''
self.meta_version = ''
self.meta = ''
self.app_type = ''
self.last_error = ''
self.running_state = APP_RUNNING_STATE['REPO']
self.save()
def delete(self):
delete_from_etcd(self.etcd_key)
def save(self):
self.last_update = get_current_time()
etcd_value = json.dumps({
'appname': self.appname,
'giturl': self.giturl,
'meta_version': self.meta_version,
'meta': self.meta,
'default_image': self.default_image,
'running_state': self.running_state,
'app_type': self.app_type,
'last_update': self.last_update,
'last_error': self.last_error,
})
set_value_to_etcd(self.etcd_key, etcd_value)
@property
def lain_config(self):
if self._lain_config is None:
self._lain_config = self._get_lain_config()
return self._lain_config
def _get_lain_config(self):
if self.meta == '' or self.meta_version == '':
return None
config = LainConf()
config.load(self.meta, self.meta_version, self.default_image,
registry=PRIVATE_REGISTRY, domains=get_domains())
return config
@property
def app_spec(self):
if self._app_spec is None:
self._app_spec = render_app_spec(self.lain_config)
return self._app_spec
def podgroup_spec(self, name):
for pg in self.app_spec.PodGroups:
if pg.Name == name:
return pg
return None
def portal_spec(self, name):
for p in self.app_spec.Portals:
if p.Name == name:
return p
return None
@property
def latest_meta_version(self):
if len(self._latest_meta_version) == 0:
versions = self.availabe_meta_versions()
if len(versions) == 0:
return None
else:
self._latest_meta_version = versions[0]
return self._latest_meta_version
def availabe_meta_versions(self):
logger.debug("try to get available meta version of app %s" %
self.appname)
tags = self.registry_tags
versions = {}
for k in tags:
meta_version = get_meta_version_from_tag(k)
if meta_version:
_timestamp = float(meta_version.split('-')[0])
versions[_timestamp] = meta_version
ordered_versions = collections.OrderedDict(
sorted(versions.items(), reverse=True))
logger.debug(
"finish getting available meta version of app %s" % self.appname)
return ordered_versions.values()
@property
def registry_tags(self):
if len(self._registry_tags) == 0:
self._registry_tags = self.docker_image_tags()
return self._registry_tags
def docker_image_tags(self):
images = search_images_from_registry(
app=self._get_registry_search_name())
return images.get('tags', [])
def _get_registry_search_name(self):
return self.appname if self.appname.find('.') < 0 else self.appname.split('.')[1]
@property
def etcd_key(self):
return self.etcd_app_key(self.appname)
def get_app_type(self):
if not self.app_type:
self.update_app_type()
return self.app_type
def update_app_type(self):
self.app_type = self._load_apptype_from_meta()
if self.appname.startswith('resource.'):
self.app_type = AppType.ResourceInstance
else:
for proc in self.lain_config.procs.values():
if proc.type == ProcType.portal and self.app_type == AppType.Normal:
self.app_type = AppType.Service
self.save()
def update_git_url(self, giturl, force=False):
if force or self.giturl == '':
self.giturl = giturl
return True
return False
def _load_apptype_from_meta(self):
try:
if self.meta == '' or self.meta_version == '':
return 'unknown'
y = yaml.safe_load(self.meta)
return y.get('apptype', AppType.Normal)
except:
return 'unknown'
def clear_last_error(self):
if self.last_error != '':
self.last_error = ''
self.save()
def update_last_error(self, err_msg):
self.last_error = err_msg
self.save()
def is_reachable(self):
return self.running_state != APP_RUNNING_STATE['REPO']
def set_deploying(self):
self.running_state = APP_RUNNING_STATE['DEPLOYING']
self.save()
def set_deployed(self):
self.running_state = APP_RUNNING_STATE['DEPLOY']
self.save()
def fetch_meta(self, meta_version):
return get_meta_from_registry(self.appname, meta_version)
def base_update_meta(self, meta_version, force=False):
try:
meta_version = normalize_meta_version(meta_version)
except Exception, e:
raise InvalidMetaVersion(e)
if meta_version == self.meta_version and not force:
return 'meta_version is already latest'
meta = self.fetch_meta(meta_version)
if not isinstance(meta, dict):
return None
self.check_giturl(meta, update=True)
meta['giturl'] = self.giturl
self.meta = yaml.safe_dump(meta, default_style='"')
self.meta_version = meta_version
if self.appname != meta['appname']:
raise InvalidLainYaml("appname dont match: %s" % meta)
self.save()
return 'meta updated'
def update_meta(self, meta_version, meta=None, force=False,
update_lain_config=True, update_spec=True):
if meta is not None:
logger.debug("meta of app `%s` was specified to `%s`" %
(self.appname, meta))
self.meta = meta
self.save()
result = "meta specified"
else:
logger.debug("try to update meta of app `%s` to meta version `%s`" % (
self.appname, meta_version))
result = self.base_update_meta(meta_version, force)
if update_lain_config:
self.lain_config = self._get_lain_config()
if update_spec:
self.app_spec = render_app_spec(self.lain_config)
logger.debug("finish updating meta of app `%s`" % self.appname)
return result
def check_giturl(self, meta, update=False):
giturl = meta.get('giturl', '')
giturl = re.sub('\.git$', '', giturl)
if giturl != '' and not giturl.startswith("http"):
giturl = "http://" + giturl
if update and self.update_git_url(giturl):
return
if self.giturl == '':
raise InvalidLainYaml("No Giturl bound")
if giturl != self.giturl:
raise InvalidLainYaml(
"app giturl '%s' doesn't match with bound url '%s'" % (giturl, self.giturl))
def check_meta_giturl(self, meta_version):
meta = self.fetch_meta(meta_version)
self.check_giturl(meta)
def check_latest_giturl(self):
latest_version = self.latest_meta_version
self.check_meta_giturl(latest_version)
def check_latest_version(self):
logger.debug("check latest version of app %s" % self.appname)
latest_version = self.latest_meta_version
if latest_version:
release_version = "%s-%s" % ("release", latest_version)
if release_version in self.registry_tags:
return True, latest_version
return False, None
def __unicode__(self):
return '<%s:%s>' % (self.appname, self.meta_version)
| |
# Copyright (c) 2015-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import hmac
import json
import unittest
import uuid
from swift.common import storage_policy, constraints
from swift.common.middleware import copy
from swift.common.middleware import crypto
from swift.common.middleware.crypto import keymaster
from swift.common.middleware.crypto.crypto_utils import (
load_crypto_meta, Crypto)
from swift.common.ring import Ring
from swift.common.swob import Request, str_to_wsgi
from swift.obj import diskfile
from test.debug_logger import debug_logger
from test.unit import skip_if_no_xattrs
from test.unit.common.middleware.crypto.crypto_helpers import (
md5hex, encrypt, TEST_KEYMASTER_CONF)
from test.unit.helpers import setup_servers, teardown_servers
class TestCryptoPipelineChanges(unittest.TestCase):
# Tests the consequences of crypto middleware being in/out of the pipeline
# or having encryption disabled for PUT/GET requests on same object. Uses
# real backend servers so that the handling of headers and sysmeta is
# verified to diskfile and back.
_test_context = None
@classmethod
def setUpClass(cls):
cls._test_context = setup_servers()
cls.proxy_app = cls._test_context["test_servers"][0]
@classmethod
def tearDownClass(cls):
if cls._test_context is not None:
teardown_servers(cls._test_context)
cls._test_context = None
def setUp(self):
skip_if_no_xattrs()
self.plaintext = b'unencrypted body content'
self.plaintext_etag = md5hex(self.plaintext)
self._setup_crypto_app()
def _setup_crypto_app(self, disable_encryption=False, root_secret_id=None):
# Set up a pipeline of crypto middleware ending in the proxy app so
# that tests can make requests to either the proxy server directly or
# via the crypto middleware. Make a fresh instance for each test to
# avoid any state coupling.
conf = {'disable_encryption': disable_encryption}
self.encryption = crypto.filter_factory(conf)(self.proxy_app)
self.encryption.logger = self.proxy_app.logger
km_conf = dict(TEST_KEYMASTER_CONF)
if root_secret_id is not None:
km_conf['active_root_secret_id'] = root_secret_id
self.km = keymaster.KeyMaster(self.encryption, km_conf)
self.crypto_app = self.km # for clarity
self.crypto_app.logger = self.encryption.logger
def _create_container(self, app, policy_name='one', container_path=None):
if not container_path:
# choose new container name so that the policy can be specified
self.container_name = uuid.uuid4().hex
self.container_path = 'http://foo:8080/v1/a/' + self.container_name
self.object_name = 'o'
self.object_path = self.container_path + '/' + self.object_name
container_path = self.container_path
req = Request.blank(
str_to_wsgi(container_path), method='PUT',
headers={'X-Storage-Policy': policy_name})
resp = req.get_response(app)
self.assertEqual('201 Created', resp.status)
# sanity check
req = Request.blank(
str_to_wsgi(container_path), method='HEAD',
headers={'X-Storage-Policy': policy_name})
resp = req.get_response(app)
self.assertEqual(policy_name, resp.headers['X-Storage-Policy'])
def _put_object(self, app, body):
req = Request.blank(
str_to_wsgi(self.object_path), method='PUT', body=body,
headers={'Content-Type': 'application/test'})
resp = req.get_response(app)
self.assertEqual('201 Created', resp.status)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
return resp
def _post_object(self, app):
req = Request.blank(str_to_wsgi(self.object_path), method='POST',
headers={'Content-Type': 'application/test',
'X-Object-Meta-Fruit': 'Kiwi'})
resp = req.get_response(app)
self.assertEqual('202 Accepted', resp.status)
return resp
def _copy_object(self, app, destination):
req = Request.blank(str_to_wsgi(self.object_path), method='COPY',
headers={'Destination': destination})
resp = req.get_response(app)
self.assertEqual('201 Created', resp.status)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
return resp
def _check_GET_and_HEAD(self, app, object_path=None):
object_path = str_to_wsgi(object_path or self.object_path)
req = Request.blank(object_path, method='GET')
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertEqual(self.plaintext, resp.body)
self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit'])
req = Request.blank(object_path, method='HEAD')
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertEqual(b'', resp.body)
self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit'])
def _check_match_requests(self, method, app, object_path=None):
object_path = str_to_wsgi(object_path or self.object_path)
# verify conditional match requests
expected_body = self.plaintext if method == 'GET' else b''
# If-Match matches
req = Request.blank(object_path, method=method,
headers={'If-Match': '"%s"' % self.plaintext_etag})
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertEqual(expected_body, resp.body)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit'])
# If-Match wildcard
req = Request.blank(object_path, method=method,
headers={'If-Match': '*'})
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertEqual(expected_body, resp.body)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit'])
# If-Match does not match
req = Request.blank(object_path, method=method,
headers={'If-Match': '"not the etag"'})
resp = req.get_response(app)
self.assertEqual('412 Precondition Failed', resp.status)
self.assertEqual(b'', resp.body)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
# If-None-Match matches
req = Request.blank(
object_path, method=method,
headers={'If-None-Match': '"%s"' % self.plaintext_etag})
resp = req.get_response(app)
self.assertEqual('304 Not Modified', resp.status)
self.assertEqual(b'', resp.body)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
# If-None-Match wildcard
req = Request.blank(object_path, method=method,
headers={'If-None-Match': '*'})
resp = req.get_response(app)
self.assertEqual('304 Not Modified', resp.status)
self.assertEqual(b'', resp.body)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
# If-None-Match does not match
req = Request.blank(object_path, method=method,
headers={'If-None-Match': '"not the etag"'})
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertEqual(expected_body, resp.body)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit'])
def _check_listing(self, app, expect_mismatch=False, container_path=None):
container_path = str_to_wsgi(container_path or self.container_path)
req = Request.blank(
container_path, method='GET', query_string='format=json')
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
listing = json.loads(resp.body)
self.assertEqual(1, len(listing))
self.assertEqual(self.object_name, listing[0]['name'])
self.assertEqual(len(self.plaintext), listing[0]['bytes'])
if expect_mismatch:
self.assertNotEqual(self.plaintext_etag, listing[0]['hash'])
else:
self.assertEqual(self.plaintext_etag, listing[0]['hash'])
def test_write_with_crypto_and_override_headers(self):
self._create_container(self.proxy_app, policy_name='one')
def verify_overrides():
# verify object sysmeta
req = Request.blank(
self.object_path, method='GET')
resp = req.get_response(self.crypto_app)
for k, v in overrides.items():
self.assertIn(k, resp.headers)
self.assertEqual(overrides[k], resp.headers[k])
# check container listing
req = Request.blank(
self.container_path, method='GET', query_string='format=json')
resp = req.get_response(self.crypto_app)
self.assertEqual('200 OK', resp.status)
listing = json.loads(resp.body)
self.assertEqual(1, len(listing))
self.assertEqual('o', listing[0]['name'])
self.assertEqual(
overrides['x-object-sysmeta-container-update-override-size'],
str(listing[0]['bytes']))
self.assertEqual(
overrides['x-object-sysmeta-container-update-override-etag'],
listing[0]['hash'])
# include overrides in headers
overrides = {'x-object-sysmeta-container-update-override-etag': 'foo',
'x-object-sysmeta-container-update-override-size':
str(len(self.plaintext) + 1)}
req = Request.blank(self.object_path, method='PUT',
body=self.plaintext, headers=overrides.copy())
resp = req.get_response(self.crypto_app)
self.assertEqual('201 Created', resp.status)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
verify_overrides()
# include overrides in footers
overrides = {'x-object-sysmeta-container-update-override-etag': 'bar',
'x-object-sysmeta-container-update-override-size':
str(len(self.plaintext) + 2)}
def callback(footers):
footers.update(overrides)
req = Request.blank(
self.object_path, method='PUT', body=self.plaintext)
req.environ['swift.callback.update_footers'] = callback
resp = req.get_response(self.crypto_app)
self.assertEqual('201 Created', resp.status)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
verify_overrides()
def test_write_with_crypto_read_with_crypto(self):
self._create_container(self.proxy_app, policy_name='one')
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
self._check_GET_and_HEAD(self.crypto_app)
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.crypto_app)
self._check_listing(self.crypto_app)
def test_write_with_crypto_read_with_crypto_different_root_secrets(self):
root_secret = self.crypto_app.root_secret
self._create_container(self.proxy_app, policy_name='one')
self._put_object(self.crypto_app, self.plaintext)
# change root secret
self._setup_crypto_app(root_secret_id='1')
root_secret_1 = self.crypto_app.root_secret
self.assertNotEqual(root_secret, root_secret_1) # sanity check
self._post_object(self.crypto_app)
self._check_GET_and_HEAD(self.crypto_app)
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.crypto_app)
self._check_listing(self.crypto_app)
# change root secret
self._setup_crypto_app(root_secret_id='2')
root_secret_2 = self.crypto_app.root_secret
self.assertNotEqual(root_secret_2, root_secret_1) # sanity check
self.assertNotEqual(root_secret_2, root_secret) # sanity check
self._check_GET_and_HEAD(self.crypto_app)
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.crypto_app)
self._check_listing(self.crypto_app)
# write object again
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
self._check_GET_and_HEAD(self.crypto_app)
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.crypto_app)
self._check_listing(self.crypto_app)
def test_write_with_crypto_read_with_crypto_ec(self):
self._create_container(self.proxy_app, policy_name='ec')
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
self._check_GET_and_HEAD(self.crypto_app)
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.crypto_app)
self._check_listing(self.crypto_app)
def test_put_without_crypto_post_with_crypto_read_with_crypto(self):
self._create_container(self.proxy_app, policy_name='one')
self._put_object(self.proxy_app, self.plaintext)
self._post_object(self.crypto_app)
self._check_GET_and_HEAD(self.crypto_app)
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.crypto_app)
self._check_listing(self.crypto_app)
def test_write_without_crypto_read_with_crypto(self):
self._create_container(self.proxy_app, policy_name='one')
self._put_object(self.proxy_app, self.plaintext)
self._post_object(self.proxy_app)
self._check_GET_and_HEAD(self.proxy_app) # sanity check
self._check_GET_and_HEAD(self.crypto_app)
self._check_match_requests('GET', self.proxy_app) # sanity check
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.proxy_app) # sanity check
self._check_match_requests('HEAD', self.crypto_app)
self._check_listing(self.crypto_app)
def test_write_without_crypto_read_with_crypto_ec(self):
self._create_container(self.proxy_app, policy_name='ec')
self._put_object(self.proxy_app, self.plaintext)
self._post_object(self.proxy_app)
self._check_GET_and_HEAD(self.proxy_app) # sanity check
self._check_GET_and_HEAD(self.crypto_app)
self._check_match_requests('GET', self.proxy_app) # sanity check
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.proxy_app) # sanity check
self._check_match_requests('HEAD', self.crypto_app)
self._check_listing(self.crypto_app)
def _check_GET_and_HEAD_not_decrypted(self, app):
req = Request.blank(self.object_path, method='GET')
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertNotEqual(self.plaintext, resp.body)
self.assertEqual('%s' % len(self.plaintext),
resp.headers['Content-Length'])
self.assertNotEqual('Kiwi', resp.headers['X-Object-Meta-Fruit'])
req = Request.blank(self.object_path, method='HEAD')
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertEqual(b'', resp.body)
self.assertNotEqual('Kiwi', resp.headers['X-Object-Meta-Fruit'])
def test_write_with_crypto_read_without_crypto(self):
self._create_container(self.proxy_app, policy_name='one')
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
self._check_GET_and_HEAD(self.crypto_app) # sanity check
# without crypto middleware, GET and HEAD returns ciphertext
self._check_GET_and_HEAD_not_decrypted(self.proxy_app)
self._check_listing(self.proxy_app, expect_mismatch=True)
def test_write_with_crypto_read_without_crypto_ec(self):
self._create_container(self.proxy_app, policy_name='ec')
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
self._check_GET_and_HEAD(self.crypto_app) # sanity check
# without crypto middleware, GET and HEAD returns ciphertext
self._check_GET_and_HEAD_not_decrypted(self.proxy_app)
self._check_listing(self.proxy_app, expect_mismatch=True)
def test_disable_encryption_config_option(self):
# check that on disable_encryption = true, object is not encrypted
self._setup_crypto_app(disable_encryption=True)
self._create_container(self.proxy_app, policy_name='one')
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
self._check_GET_and_HEAD(self.crypto_app)
# check as if no crypto middleware exists
self._check_GET_and_HEAD(self.proxy_app)
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.crypto_app)
self._check_match_requests('GET', self.proxy_app)
self._check_match_requests('HEAD', self.proxy_app)
def test_write_with_crypto_read_with_disable_encryption_conf(self):
self._create_container(self.proxy_app, policy_name='one')
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
self._check_GET_and_HEAD(self.crypto_app) # sanity check
# turn on disable_encryption config option
self._setup_crypto_app(disable_encryption=True)
# GET and HEAD of encrypted objects should still work
self._check_GET_and_HEAD(self.crypto_app)
self._check_listing(self.crypto_app, expect_mismatch=False)
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.crypto_app)
def _test_ondisk_data_after_write_with_crypto(self, policy_name):
policy = storage_policy.POLICIES.get_by_name(policy_name)
self._create_container(self.proxy_app, policy_name=policy_name)
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
# Verify container listing etag is encrypted by direct GET to container
# server. We can use any server for all nodes since they all share same
# devices dir.
cont_server = self._test_context['test_servers'][3]
cont_ring = Ring(self._test_context['testdir'], ring_name='container')
part, nodes = cont_ring.get_nodes('a', self.container_name)
for node in nodes:
req = Request.blank('/%s/%s/a/%s'
% (node['device'], part, self.container_name),
method='GET', query_string='format=json')
resp = req.get_response(cont_server)
listing = json.loads(resp.body)
# sanity checks...
self.assertEqual(1, len(listing))
self.assertEqual('o', listing[0]['name'])
self.assertEqual('application/test', listing[0]['content_type'])
# verify encrypted etag value
parts = listing[0]['hash'].rsplit(';', 1)
crypto_meta_param = parts[1].strip()
crypto_meta = crypto_meta_param[len('swift_meta='):]
listing_etag_iv = load_crypto_meta(crypto_meta)['iv']
exp_enc_listing_etag = base64.b64encode(
encrypt(self.plaintext_etag.encode('ascii'),
self.km.create_key('/a/%s' % self.container_name),
listing_etag_iv)).decode('ascii')
self.assertEqual(exp_enc_listing_etag, parts[0])
# Verify diskfile data and metadata is encrypted
ring_object = self.proxy_app.get_object_ring(int(policy))
partition, nodes = ring_object.get_nodes('a', self.container_name, 'o')
conf = {'devices': self._test_context["testdir"],
'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, debug_logger())[policy]
ondisk_data = []
exp_enc_body = None
for node_index, node in enumerate(nodes):
df = df_mgr.get_diskfile(node['device'], partition,
'a', self.container_name, 'o',
policy=policy)
with df.open():
meta = df.get_metadata()
contents = b''.join(df.reader())
metadata = dict((k.lower(), v) for k, v in meta.items())
# verify on disk data - body
body_iv = load_crypto_meta(
metadata['x-object-sysmeta-crypto-body-meta'])['iv']
body_key_meta = load_crypto_meta(
metadata['x-object-sysmeta-crypto-body-meta'])['body_key']
obj_key = self.km.create_key('/a/%s/o' % self.container_name)
body_key = Crypto().unwrap_key(obj_key, body_key_meta)
exp_enc_body = encrypt(self.plaintext, body_key, body_iv)
ondisk_data.append((node, contents))
# verify on disk user metadata
enc_val, meta = metadata[
'x-object-transient-sysmeta-crypto-meta-fruit'].split(';')
meta = meta.strip()[len('swift_meta='):]
metadata_iv = load_crypto_meta(meta)['iv']
exp_enc_meta = base64.b64encode(encrypt(
b'Kiwi', obj_key, metadata_iv)).decode('ascii')
self.assertEqual(exp_enc_meta, enc_val)
self.assertNotIn('x-object-meta-fruit', metadata)
self.assertIn(
'x-object-transient-sysmeta-crypto-meta', metadata)
meta = load_crypto_meta(
metadata['x-object-transient-sysmeta-crypto-meta'])
self.assertIn('key_id', meta)
self.assertIn('path', meta['key_id'])
self.assertEqual(
'/a/%s/%s' % (self.container_name, self.object_name),
meta['key_id']['path'])
self.assertIn('v', meta['key_id'])
self.assertEqual('2', meta['key_id']['v'])
self.assertIn('cipher', meta)
self.assertEqual(Crypto.cipher, meta['cipher'])
# verify etag
actual_enc_etag, _junk, actual_etag_meta = metadata[
'x-object-sysmeta-crypto-etag'].partition('; swift_meta=')
etag_iv = load_crypto_meta(actual_etag_meta)['iv']
exp_enc_etag = base64.b64encode(encrypt(
self.plaintext_etag.encode('ascii'),
obj_key, etag_iv)).decode('ascii')
self.assertEqual(exp_enc_etag, actual_enc_etag)
# verify etag hmac
exp_etag_mac = hmac.new(
obj_key, self.plaintext_etag.encode('ascii'),
digestmod=hashlib.sha256).digest()
exp_etag_mac = base64.b64encode(exp_etag_mac).decode('ascii')
self.assertEqual(exp_etag_mac,
metadata['x-object-sysmeta-crypto-etag-mac'])
# verify etag override for container updates
override = 'x-object-sysmeta-container-update-override-etag'
parts = metadata[override].rsplit(';', 1)
crypto_meta_param = parts[1].strip()
crypto_meta = crypto_meta_param[len('swift_meta='):]
listing_etag_iv = load_crypto_meta(crypto_meta)['iv']
cont_key = self.km.create_key('/a/%s' % self.container_name)
exp_enc_listing_etag = base64.b64encode(
encrypt(self.plaintext_etag.encode('ascii'), cont_key,
listing_etag_iv)).decode('ascii')
self.assertEqual(exp_enc_listing_etag, parts[0])
self._check_GET_and_HEAD(self.crypto_app)
return exp_enc_body, ondisk_data
def test_ondisk_data_after_write_with_crypto(self):
exp_body, ondisk_data = self._test_ondisk_data_after_write_with_crypto(
policy_name='one')
for node, body in ondisk_data:
self.assertEqual(exp_body, body)
def test_ondisk_data_after_write_with_crypto_ec(self):
exp_body, ondisk_data = self._test_ondisk_data_after_write_with_crypto(
policy_name='ec')
policy = storage_policy.POLICIES.get_by_name('ec')
for frag_selection in (ondisk_data[:2], ondisk_data[1:]):
frags = [frag for node, frag in frag_selection]
self.assertEqual(exp_body, policy.pyeclib_driver.decode(frags))
def _test_copy_encrypted_to_encrypted(
self, src_policy_name, dest_policy_name):
self._create_container(self.proxy_app, policy_name=src_policy_name)
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
copy_crypto_app = copy.ServerSideCopyMiddleware(self.crypto_app, {})
dest_container = uuid.uuid4().hex
dest_container_path = 'http://localhost:8080/v1/a/' + dest_container
self._create_container(copy_crypto_app, policy_name=dest_policy_name,
container_path=dest_container_path)
dest_obj_path = dest_container_path + '/o'
dest = '/%s/%s' % (dest_container, 'o')
self._copy_object(copy_crypto_app, dest)
self._check_GET_and_HEAD(copy_crypto_app, object_path=dest_obj_path)
self._check_listing(
copy_crypto_app, container_path=dest_container_path)
self._check_match_requests(
'GET', copy_crypto_app, object_path=dest_obj_path)
self._check_match_requests(
'HEAD', copy_crypto_app, object_path=dest_obj_path)
def test_copy_encrypted_to_encrypted(self):
self._test_copy_encrypted_to_encrypted('ec', 'ec')
self._test_copy_encrypted_to_encrypted('one', 'ec')
self._test_copy_encrypted_to_encrypted('ec', 'one')
self._test_copy_encrypted_to_encrypted('one', 'one')
def _test_copy_encrypted_to_unencrypted(
self, src_policy_name, dest_policy_name):
self._create_container(self.proxy_app, policy_name=src_policy_name)
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
# make a pipeline with encryption disabled, use it to copy object
self._setup_crypto_app(disable_encryption=True)
copy_app = copy.ServerSideCopyMiddleware(self.crypto_app, {})
dest_container = uuid.uuid4().hex
dest_container_path = 'http://localhost:8080/v1/a/' + dest_container
self._create_container(self.crypto_app, policy_name=dest_policy_name,
container_path=dest_container_path)
dest_obj_path = dest_container_path + '/o'
dest = '/%s/%s' % (dest_container, 'o')
self._copy_object(copy_app, dest)
self._check_GET_and_HEAD(copy_app, object_path=dest_obj_path)
self._check_GET_and_HEAD(self.proxy_app, object_path=dest_obj_path)
self._check_listing(copy_app, container_path=dest_container_path)
self._check_listing(self.proxy_app, container_path=dest_container_path)
self._check_match_requests(
'GET', self.proxy_app, object_path=dest_obj_path)
self._check_match_requests(
'HEAD', self.proxy_app, object_path=dest_obj_path)
def test_copy_encrypted_to_unencrypted(self):
self._test_copy_encrypted_to_unencrypted('ec', 'ec')
self._test_copy_encrypted_to_unencrypted('one', 'ec')
self._test_copy_encrypted_to_unencrypted('ec', 'one')
self._test_copy_encrypted_to_unencrypted('one', 'one')
def _test_copy_unencrypted_to_encrypted(
self, src_policy_name, dest_policy_name):
self._create_container(self.proxy_app, policy_name=src_policy_name)
self._put_object(self.proxy_app, self.plaintext)
self._post_object(self.proxy_app)
copy_crypto_app = copy.ServerSideCopyMiddleware(self.crypto_app, {})
dest_container = uuid.uuid4().hex
dest_container_path = 'http://localhost:8080/v1/a/' + dest_container
self._create_container(copy_crypto_app, policy_name=dest_policy_name,
container_path=dest_container_path)
dest_obj_path = dest_container_path + '/o'
dest = '/%s/%s' % (dest_container, 'o')
self._copy_object(copy_crypto_app, dest)
self._check_GET_and_HEAD(copy_crypto_app, object_path=dest_obj_path)
self._check_listing(
copy_crypto_app, container_path=dest_container_path)
self._check_match_requests(
'GET', copy_crypto_app, object_path=dest_obj_path)
self._check_match_requests(
'HEAD', copy_crypto_app, object_path=dest_obj_path)
def test_copy_unencrypted_to_encrypted(self):
self._test_copy_unencrypted_to_encrypted('ec', 'ec')
self._test_copy_unencrypted_to_encrypted('one', 'ec')
self._test_copy_unencrypted_to_encrypted('ec', 'one')
self._test_copy_unencrypted_to_encrypted('one', 'one')
def test_crypto_max_length_path(self):
# the path is stashed in the key_id in crypto meta; check that a long
# path is ok
self.container_name = 'c' * constraints.MAX_CONTAINER_NAME_LENGTH
self.object_name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH
self.container_path = 'http://foo:8080/v1/a/' + self.container_name
self.object_path = '%s/%s' % (self.container_path, self.object_name)
self._create_container(self.proxy_app, policy_name='one',
container_path=self.container_path)
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
self._check_GET_and_HEAD(self.crypto_app)
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.crypto_app)
self._check_listing(self.crypto_app)
def test_crypto_UTF8_path(self):
# check that UTF8 path is ok
self.container_name = self.object_name = u'\u010brypto'
self.container_path = 'http://foo:8080/v1/a/' + self.container_name
self.object_path = '%s/%s' % (self.container_path, self.object_name)
self._create_container(self.proxy_app, policy_name='one',
container_path=self.container_path)
self._put_object(self.crypto_app, self.plaintext)
self._post_object(self.crypto_app)
self._check_GET_and_HEAD(self.crypto_app)
self._check_match_requests('GET', self.crypto_app)
self._check_match_requests('HEAD', self.crypto_app)
self._check_listing(self.crypto_app)
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to replicate model_fn's over local GPUs.
This file contains util that allow to replicate `Estimator.model_fn` over
GPUs. Replicated version of a `model_fn` is returned that can subsequently
be used with `Estimator`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
import copy
import six
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import device as framework_device
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import device_setter as device_setter_lib
from tensorflow.python.training import optimizer as optimizer_lib
def replicate_model_fn(model_fn,
loss_reduction=losses.Reduction.SUM,
devices=None):
"""Replicate `Estimator.model_fn` over GPUs within a single host.
The given `model_fn` specifies a single forward pass of a model. To replicate
such a model over GPUs, each GPU gets its own instance of the forward pass
(a.k.a. a tower). The input features and labels get sharded into the chunks
that correspond to the number of GPUs. Each tower computes a loss based
on its input. For each such loss, gradients are computed. After that, the
available losses are aggregated to form aggregated loss. Available
gradients are summed. Then, they update weights using the specified
optimizer.
If `devices` are `None`, then all available GPUs are going to be used for
replication. If no GPUs are available, then the model is going to be
placed on the CPU.
Two modes of local replication over available GPUs are supported:
1) If exactly 1 GPU is detected, then variables and operations are placed
onto GPU.
2) If more than 1 GPU is detected, then variables are going to be placed on
the CPU. Replicas of operations are placed on each individual GPU.
Here is an example of how one might use their `model_fn` to run over GPUs:
```python
...
def model_fn(...): # See `model_fn` in `Estimator`.
loss = ...
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
optimizer = tf.contrib.estimator.GatheringOptimizer(optimizer)
if mode == tf.estimator.ModeKeys.TRAIN:
# See the section below on `EstimatorSpec.train_op`.
return EstimatorSpec(mode=mode, loss=loss,
train_op=optimizer.minimize(loss))
# No change for `ModeKeys.EVAL` or `ModeKeys.PREDICT`.
return EstimatorSpec(...)
...
classifier = tf.estimator.Estimator(
model_fn=tf.contrib.estimator.replicate_model_fn(model_fn))
```
On `EstimatorSpec.train_op`:
`model_fn` returns `EstimatorSpec.train_op` for
`tf.estimator.GraphKeys.TRAIN`. It is typically derived using an optimizer.
Towers are expected to populate it in the same way. Gradients from all towers
are reduced and applied in the last tower. To achieve that,
`GatheringOptimizer` needs to be used. See `GatheringOptimizer`.
On sharding input features and labels:
Input features and labels are split for consumption by each tower. They are
split across the dimension 0. Features and labels need to be batch major.
On reduction algorithms:
Certain algorithms were chosen for aggregating results of computations on
multiple towers:
- Losses from all towers are reduced according to `loss_reduction`.
- Gradients are reduced using sum for each trainable variable.
- `eval_metrics_ops` are reduced per metric using `reduce_mean`.
- `EstimatorSpec.predictions` and `EstimatorSpec.export_outputs` are
reduced using concatenation.
- For all other fields of `EstimatorSpec` the values of the first tower
are taken.
On distribution of variables:
Variables are not duplicated between towers. Instead, they are placed on a
single device as defined above and shared across towers.
Other current limitations:
- `predictions` are not supported for `ModeKeys.EVAL`. That is required for
`tf.contrib.estimator.add_metrics`.
Args:
model_fn: `model_fn` as defined in `Estimator`. See the section above about
the train_op argument of `EstimatorSpec`.
loss_reduction: controls whether losses are summed or averaged.
devices: Optional list of devices to replicate the model across. This
argument can be used to replice only on the subset of available GPUs.
If `None`, then all available GPUs are going to be used for replication.
If no GPUs are available, then the model is going to be placed on the CPU.
Returns:
A replicated version of the supplied `model_fn`. Returned function that
conforms to the requirements of `Estimator`'s `model_fn` and can be used
instead of the supplied `model_fn`.
"""
return _replicate_model_fn_with_mode(
model_fn,
loss_reduction,
devices,
# TODO(isaprykin): Query the system configuration to choose modes other
# than `SHARED_LOCAL_PARAMETER_SERVER`, even though it is often
# appropriate.
mode=_VariableDistributionMode.SHARED_LOCAL_PARAMETER_SERVER)
class _VariableDistributionMode(object):
"""Modes for variable distribution used for forcing a particular one.
Forcing a mode is meant for performance experimentation purposes rather than
for general use cases.
"""
SHARED_LOCAL_PARAMETER_SERVER = 1
"""Variables are placed on a single device and shared across all devices.
Two ways to achieve this distribution over available GPUs are supported:
1) If exactly 1 GPU is detected, then variables and operations are placed
onto GPU.
2) If more than 1 GPU is detected, then variables are going to be placed on
the CPU. Replicas of operations are placed on each individual GPU.
"""
SHARED_ROUND_ROBIN = 2
"""Variables are placed on all devices in a round-robin fashion.
Every subsequent variable is placed on the next device. There is only one
copy of each variable that is shared across all devices.
"""
def _replicate_model_fn_with_mode(
model_fn,
loss_reduction=losses.Reduction.SUM,
devices=None,
mode=_VariableDistributionMode.SHARED_LOCAL_PARAMETER_SERVER):
"""A version of `replicate_model_fn` that allows to specify a `mode`."""
if loss_reduction == losses.Reduction.NONE:
raise ValueError('Tower losses need to be reduced in some way, yet {} '
'reduction is specified.'.format(loss_reduction))
if not devices:
devices = _get_local_devices('GPU') or _get_local_devices('CPU')
is_a_single_gpu_case = len(devices) == 1 and 'GPU' in devices[0]
consolidation_device = '/{}:0'.format('GPU'
if is_a_single_gpu_case else 'CPU')
ps_devices = [consolidation_device]
if mode == _VariableDistributionMode.SHARED_ROUND_ROBIN:
ps_devices = devices
tf_logging.info('Replicating the `model_fn` across {}. Variables are going '
'to be placed on {}. Consolidation device is going to be {}.'
.format(devices, ps_devices, consolidation_device))
def replicated_model_fn(features, labels, mode, params=None, config=None):
"""Replicated version of `model_fn` to be used instead."""
feature_shards, label_shards = _split_batch(
features, labels, len(devices), device=consolidation_device)
tower_specs = _get_loss_towers(
model_fn=model_fn,
mode=mode,
features=feature_shards,
labels=label_shards,
params=params,
loss_reduction=loss_reduction,
config=config,
devices=devices,
local_ps_devices=ps_devices)
if mode == model_fn_lib.ModeKeys.TRAIN:
train_op = _minimize_towers(tower_specs)
return _train_spec(
tower_specs, train_op, aggregation_device=consolidation_device)
elif mode == model_fn_lib.ModeKeys.EVAL:
return _eval_spec(tower_specs, aggregation_device=consolidation_device)
elif mode == model_fn_lib.ModeKeys.PREDICT:
return _predict_spec(tower_specs, aggregation_device=consolidation_device)
return replicated_model_fn
class GatheringOptimizer(optimizer_lib.Optimizer):
"""Gathers gradients from all towers and reduces them in the last one."""
COLLECTION_FOR_GRAPH_STATES = 'replicate_model_fn_graph_states'
def __init__(self, optimizer_or_optimizer_fn):
"""Wrap an existing optimizer for gathering gradients across towers.
Each invocation of model_fn has to call optimizers in the same order.
Multiple optimizers that use the same or different losses are supported.
Optimizers, however, need to be of different type as per `__class__`
in order to increment the global_step correctly.
Args:
optimizer_or_optimizer_fn: an instance of optimizer to wrap. That
instance is going to be used for optimizer-specific logic. This can
also be a no-argument function that returns such an optimizer instance.
"""
self._optimizer_or_optimizer_fn = optimizer_or_optimizer_fn
@staticmethod
def has_been_used():
return GatheringOptimizer._graph_state().has_gathering_optimizer_been_used
def get_slot(self, *args, **kwargs):
return self._get_optimizer().get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
return self._get_optimizer().get_slot_names(*args, **kwargs)
def get_name(self, *args, **kwargs):
return self._get_optimizer().get_name(*args, **kwargs)
def variables(self, *args, **kwargs):
return self._get_optimizer().variables(*args, **kwargs)
def compute_gradients(self, loss, *args, **kwargs):
"""Compute gradients, but first, if needed, scale the loss."""
loss = _scale_loss(loss,
self._graph_state().loss_reduction,
self._graph_state().number_of_towers)
return self._get_optimizer().compute_gradients(loss, *args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, **kwargs):
"""Collect gradients updates to apply them with the last tower."""
self._graph_state().collect_gradients(grads_and_vars,
self._get_optimizer())
if not self._graph_state().is_the_last_tower:
return self._construct_no_op_train_op()
else:
# Gradients need to be gathered and applied in the scope of the first
# tower, so that the tensors are accessible via names without prefixes.
var_scope, name_scope = self._graph_state().scopes_of_the_first_tower
with variable_scope.variable_scope(var_scope):
with ops_lib.name_scope(name_scope):
return self._apply_gathered_gradients(global_step, **kwargs)
def _apply_gathered_gradients(self, global_step, **kwargs):
graph_state = self._graph_state()
optimizer = self._get_optimizer()
train_ops = []
grad_lists = {}
# Only aggregate gradients for `optimizer.__class__` type of Optimizer.
for grad, var in graph_state.get_grad_and_vars_for_optimizer(optimizer):
if grad is not None:
grad_lists.setdefault(var, []).append(grad)
aggregated_grads = []
with ops_lib.name_scope('gradient_aggregating'):
for var, grads in six.iteritems(grad_lists):
grad = _compute_sum_on_device(grads, var.device)
aggregated_grads.append((grad, var))
train_ops.append(optimizer.apply_gradients(aggregated_grads))
# A model might use multiple optimizers. We only want to increment global
# step after apply_gradients of the last optimizer inside the tower.
if global_step and graph_state.is_the_last_optimizer_within_a_tower(
optimizer):
with ops_lib.control_dependencies(train_ops):
with ops_lib.colocate_with(global_step):
return state_ops.assign_add(global_step, 1)
else:
return control_flow_ops.group(train_ops)
def _get_optimizer(self):
if not isinstance(self._optimizer_or_optimizer_fn, optimizer_lib.Optimizer):
# If optimizer is given as a function then we need to wait till we are
# under the right graph context before constructing it.
self._optimizer_or_optimizer_fn = self._optimizer_or_optimizer_fn()
self._graph_state().has_gathering_optimizer_been_used = True
return self._optimizer_or_optimizer_fn
def _construct_no_op_train_op(self):
return control_flow_ops.no_op(name='train_op_placeholder')
@staticmethod
def _graph_state():
graph_states = ops_lib.get_default_graph().get_collection_ref(
GatheringOptimizer.COLLECTION_FOR_GRAPH_STATES)
if not graph_states:
graph_states.append(GatheringOptimizer._PerGraphState())
return graph_states[-1]
@staticmethod
def _clear_graph_state():
# Clearing the Graph collection will prevent _PerGraphState from being
# serialized.
ops_lib.get_default_graph().clear_collection(
GatheringOptimizer.COLLECTION_FOR_GRAPH_STATES)
class _PerGraphState(object):
"""Gradient reduction related state of a Tensorflow graph."""
def __init__(self):
# For every type of optimizer, collect all gradients and variables.
self._optimizer_grads_and_vars = {}
# In what order were optimizers invoked within each tower?
self._ordered_optimizer_types = []
self._number_of_towers = 0
self._is_the_last_tower = False
self._loss_reduction = None
# Scopes of the first tower that don't have a prefix:
self._variable_scope = None
self._name_scope = None
# If needed, alert that GatheringOptimizer needs to be used with model_fn.
self._has_gathering_optimizer_been_used = False
def collect_gradients(self, grads_and_vars, optimizer):
if optimizer.__class__ not in self._ordered_optimizer_types:
self._ordered_optimizer_types.append(optimizer.__class__)
self._optimizer_grads_and_vars.setdefault(optimizer.__class__,
[]).extend(grads_and_vars)
def get_grad_and_vars_for_optimizer(self, optimizer):
return self._optimizer_grads_and_vars[optimizer.__class__]
def set_reduction_across_towers(self, loss_reduction, number_of_towers):
self._loss_reduction = loss_reduction
self._number_of_towers = number_of_towers
@contextmanager
def tower(self, tower_id, var_scope, name_scope):
if tower_id == 0:
self._variable_scope = var_scope
self._name_scope = name_scope
if tower_id == (self._number_of_towers - 1):
self._is_the_last_tower = True
yield
self._is_the_last_tower = False
@property
def scopes_of_the_first_tower(self):
return self._variable_scope, self._name_scope
@property
def is_the_last_tower(self):
return self._is_the_last_tower
def is_the_last_optimizer_within_a_tower(self, optimizer):
return optimizer.__class__ == self._ordered_optimizer_types[-1]
@property
def number_of_towers(self):
return self._number_of_towers
@property
def loss_reduction(self):
return self._loss_reduction
@property
def has_gathering_optimizer_been_used(self):
return self._has_gathering_optimizer_been_used
@has_gathering_optimizer_been_used.setter
def has_gathering_optimizer_been_used(self, value):
self._has_gathering_optimizer_been_used = value
def _get_local_devices(device_type):
local_device_protos = device_lib.list_local_devices()
return [
device.name
for device in local_device_protos
if device.device_type == device_type
]
def _split_batch(features, labels, number_of_shards, device):
"""Split input features and labes into batches."""
def split_dictionary(dictionary):
"""Split a dictionary into shards."""
shards = [{} for _ in range(number_of_shards)]
for name, tensor in six.iteritems(dictionary):
if isinstance(tensor, sparse_tensor.SparseTensor):
for i, shard in enumerate(
sparse_ops.sparse_split(
sp_input=tensor, num_split=number_of_shards, axis=0)):
shards[i][name] = shard
else:
for i, shard in enumerate(array_ops.split(tensor, number_of_shards)):
shards[i][name] = shard
return shards
with ops_lib.name_scope('split_inputs'):
with ops_lib.device(device):
if isinstance(features, dict):
feature_shards = split_dictionary(features)
else:
feature_shards = array_ops.split(features, number_of_shards)
if labels is None:
label_shards = None
elif isinstance(labels, dict):
label_shards = split_dictionary(labels)
else:
label_shards = array_ops.split(labels, number_of_shards)
return feature_shards, label_shards
_DEFAULT_NAME_SCOPE_PATTERN = 'tower_{}'
def _get_loss_towers(model_fn,
mode,
features,
labels,
params,
config,
devices,
local_ps_devices,
loss_reduction=losses.Reduction.SUM,
name_scope_pattern=_DEFAULT_NAME_SCOPE_PATTERN):
"""Replicate the loss computation across devices."""
tower_specs = []
model_fn_args = util.fn_args(model_fn)
optional_params = {}
if 'params' in model_fn_args:
optional_params['params'] = copy.deepcopy(params)
if 'config' in model_fn_args:
optional_params['config'] = copy.deepcopy(config)
# pylint: disable=protected-access
round_robin_strategy = device_setter_lib._RoundRobinStrategy(
num_tasks=len(local_ps_devices))
GatheringOptimizer._graph_state().set_reduction_across_towers(
loss_reduction, len(devices))
for i, device in enumerate(devices):
is_the_first_tower = (i == 0)
device_setter = _local_device_setter(
worker_device=device,
ps_devices=local_ps_devices,
ps_strategy=round_robin_strategy)
# We would like to preserve the names of the variables and ops that the user
# might be relying on. Names without a prefix are going to resolve to
# variables and ops of the first tower.
name_scope = name_scope_pattern
if is_the_first_tower:
name_scope = ''
with variable_scope.variable_scope(
'', reuse=not is_the_first_tower) as var_scope:
with ops_lib.name_scope(name_scope.format(i)) as name_scope:
with GatheringOptimizer._graph_state().tower(
tower_id=i, var_scope=var_scope, name_scope=name_scope):
with ops_lib.device(device_setter):
labels_shard = None
if labels:
labels_shard = labels[i]
tower_spec = model_fn(
mode=mode,
features=features[i],
labels=labels_shard,
**optional_params)
if (tower_spec.train_op is not None and
not GatheringOptimizer.has_been_used()):
raise ValueError('Please wrap optimizers with GatheringOptimizer'
' in order to use replicate_model_fn.')
# Scaling the loss here doesn't actually affect gradients. Another
# instance of scaling happens inside the GatheringOptimizer.
tower_spec = _scale_tower_loss(
tower_spec, loss_reduction, number_of_towers=len(devices))
tower_specs.append(tower_spec)
GatheringOptimizer._clear_graph_state()
# pylint: enable=protected-access
return tower_specs
def _local_device_setter(worker_device, ps_devices, ps_strategy):
"""A device setter that puts distributes Var/Ops to PS/workers."""
ps_ops = ['Variable', 'VariableV2', 'VarHandleOp']
def local_device_chooser(op):
current_device = framework_device.DeviceSpec.from_string(op.device or '')
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
if node_def.op in ps_ops:
ps_device_spec = framework_device.DeviceSpec.from_string(
'{}'.format(ps_devices[ps_strategy(op)]))
ps_device_spec.merge_from(current_device)
return ps_device_spec.to_string()
else:
worker_device_spec = framework_device.DeviceSpec.from_string(
worker_device or '')
worker_device_spec.merge_from(current_device)
return worker_device_spec.to_string()
return local_device_chooser
def _scale_tower_loss(tower_spec, loss_reduction, number_of_towers):
"""Produce an EstimatorSpec with approproriately scaled loss."""
if tower_spec.loss is None:
return tower_spec
estimator_spec = _asdict(tower_spec)
estimator_spec['loss'] = _scale_loss(tower_spec.loss, loss_reduction,
number_of_towers)
return model_fn_lib.EstimatorSpec(**estimator_spec)
def _scale_loss(loss, loss_reduction, number_of_towers):
"""If needed, scale down the loss for averaging loss by summing."""
if loss is None:
return None
if loss_reduction != losses.Reduction.SUM:
return math_ops.div(loss, 1.0 * number_of_towers, name='averaged_loss')
else:
return loss
def _minimize_towers(tower_specs):
"""`train_op` of the last tower applies aggregated gradients."""
return tower_specs[-1].train_op
def _compute_sum_on_device(values, device, name=None):
with ops_lib.device(device):
if isinstance(values[0], ops_lib.IndexedSlices):
if name:
raise ValueError('The name {} is not expected to be given to '
'IndexedSlices {}'.format(name, values))
values_concat = array_ops.concat([v.values for v in values], axis=0)
indices_concat = array_ops.concat([v.indices for v in values], axis=0)
return ops_lib.IndexedSlices(values_concat, indices_concat,
values[0].dense_shape)
else:
return math_ops.add_n(values, name=name)
def _train_spec(tower_specs,
train_op,
aggregation_device,
aggregated_loss_name='loss'):
"""Populate replicated EstimatorSpec for `GraphKeys.TRAIN`."""
estimator_spec = _asdict(tower_specs[0])
estimator_spec['mode'] = model_fn_lib.ModeKeys.TRAIN
estimator_spec['train_op'] = train_op
estimator_spec['loss'] = _compute_sum_on_device(
[spec.loss for spec in tower_specs], aggregation_device,
aggregated_loss_name)
return model_fn_lib.EstimatorSpec(**estimator_spec)
def _eval_spec(tower_specs, aggregation_device, aggregated_loss_name='loss'):
"""Populate replicated EstimatorSpec for `GraphKeys.EVAL`."""
estimator_spec = _asdict(tower_specs[0])
estimator_spec['mode'] = model_fn_lib.ModeKeys.EVAL
estimator_spec['loss'] = _compute_sum_on_device(
[spec.loss for spec in tower_specs], aggregation_device,
aggregated_loss_name)
update_ops = []
for tower_spec in tower_specs:
for name, (_, update_op) in six.iteritems(tower_spec.eval_metric_ops):
update_ops.append(update_op)
with ops_lib.control_dependencies(update_ops):
reduced_update_op = _reduce_metric_variables(len(tower_specs))
eval_metric_ops = {}
for name, (metric_tensor, _) in six.iteritems(tower_specs[0].eval_metric_ops):
eval_metric_ops[name] = (metric_tensor, reduced_update_op)
estimator_spec['eval_metric_ops'] = eval_metric_ops
return model_fn_lib.EstimatorSpec(**estimator_spec)
def _reduce_metric_variables(number_of_towers):
"""Aggregate local variables used in metrics into the first tower."""
if number_of_towers == 1:
return control_flow_ops.no_op(name='no_eval_metric_reduction')
metric_variables = ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)
variables_per_tower = len(metric_variables) // number_of_towers
if len(metric_variables) % number_of_towers != 0:
raise ValueError(
'Different `EstimatorSpec.eval_metric_ops` across `model_fn()` calls.'
' Expected {} local variables, but got {} instead.'.format(
variables_per_tower * number_of_towers, len(metric_variables)))
# `metric_variables` has the size of `variables_per_tower` x
# number_of_towers. Each tower is produced by calling the same model_fn.
# First `variables_per_tower` correspond to the first tower. Each such
# variable has an replica at the `(variables_per_tower * i)` position, where
# `i` is `[1.. number_of_towers]`. We are going to add values from replicas
# to each variable of the first tower. We then zero out replica values, so
# that `_reduce_metric_variables` operation is idempotent. If a metric
# is then computed based on local variables from the first tower, then the
# resulting metric is an estimate for all `number_of_towers` towers.
ops = []
for i in range(0, variables_per_tower):
next_replica_id = i + variables_per_tower
replicas = [
metric_variables[replica_id]
for replica_id in range(next_replica_id, len(metric_variables),
variables_per_tower)
] # `replicas` doesn't contain the first-tower variable.
reduce_op = state_ops.assign_add(metric_variables[i],
math_ops.add_n(replicas))
with ops_lib.control_dependencies([reduce_op]):
for replica in replicas:
zeros_for_replica = array_ops.zeros(
array_ops.shape(replica), dtype=replica.dtype)
zero_out_replica_op = state_ops.assign(replica, zeros_for_replica)
ops.append(zero_out_replica_op)
return control_flow_ops.group(*ops)
def _predict_spec(tower_specs, aggregation_device):
"""Populate replicated EstimatorSpec for `GraphKeys.PREDICT`."""
estimator_spec = _asdict(tower_specs[0])
estimator_spec['mode'] = model_fn_lib.ModeKeys.PREDICT
with ops_lib.device(aggregation_device):
estimator_spec['predictions'] = _concat_tensor_dicts(
*[tower_spec.predictions for tower_spec in tower_specs])
export_outputs_dict = _dict_concat(
*[tower_spec.export_outputs for tower_spec in tower_specs])
export_outputs = {}
for name, export_output_list in six.iteritems(export_outputs_dict):
if isinstance(export_output_list[0], export_output_lib.PredictOutput):
export_outputs[name] = export_output_lib.PredictOutput(
outputs=_concat_tensor_dicts(*[
export_output.outputs for export_output in export_output_list
]))
elif isinstance(export_output_list[0],
export_output_lib.RegressionOutput):
export_outputs[name] = export_output_lib.RegressionOutput(
value=array_ops.concat(
[export_output.value for export_output in export_output_list],
axis=0))
elif isinstance(export_output_list[0],
export_output_lib.ClassificationOutput):
scores = None
if export_output_list[0].scores is not None:
scores = array_ops.concat(
[export_output.scores for export_output in export_output_list],
axis=0)
classes = None
if export_output_list[0].classes is not None:
classes = array_ops.stack(
[export_output.classes for export_output in export_output_list],
axis=0)
export_outputs[name] = export_output_lib.ClassificationOutput(
scores=scores, classes=classes)
estimator_spec['export_outputs'] = export_outputs
return model_fn_lib.EstimatorSpec(**estimator_spec)
def _concat_tensor_dicts(*tensor_dicts):
return {
name: array_ops.concat(tensors, axis=0, name=name)
for name, tensors in six.iteritems(_dict_concat(*tensor_dicts))
}
def _dict_concat(*dicts):
list_dict = {}
for d in dicts:
if d is None:
continue
for k, v in six.iteritems(d):
list_dict.setdefault(k, []).append(v)
return list_dict
def _asdict(namedtuple):
"""Returns a namedtuple as a dictionary.
This is required because `_asdict()` in Python 3.x.x is broken in classes
that inherit from `collections.namedtuple`. See
https://bugs.python.org/issue24931 for more details.
Args:
namedtuple: An object that inherits from `collections.namedtuple`.
Returns:
A dictionary version of the tuple.
"""
return {k: getattr(namedtuple, k) for k in namedtuple._fields}
| |
# -*- coding: utf-8 -*-
"""Test for site detection."""
#
# (C) Pywikibot team, 2014-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from requests.exceptions import Timeout
from pywikibot.exceptions import ServerError
from pywikibot.site_detect import MWSite
from pywikibot.tools import MediaWikiVersion, PY2
from tests.aspects import unittest, TestCase
if not PY2:
basestring = (str,)
class TestWikiSiteDetection(TestCase):
"""Test Case for MediaWiki detection and site object creation."""
# This list is intentionally shared between all classes
_urls_tested = set()
# Whether it allows multiple tests of the same URL
allow_multiple = False
def setUp(self):
"""Set up test."""
self.skips = {}
self.failures = {}
self.errors = {}
self.passes = {}
self.all = []
# reset after end of test
self._previous_multiple = self.allow_multiple
super(TestWikiSiteDetection, self).setUp()
def tearDown(self):
"""Tear Down test."""
def norm(url):
res = None
typ = -1
for pos, result in enumerate([self.passes, self.errors,
self.failures, self.skips]):
if url in result:
assert res is None
res = result[url]
typ = pos
if res is None:
typ = len(PREFIXES) - 1
res = 'Missing'
assert 0 <= pos < len(PREFIXES)
return typ, url, res
self.allow_multiple = self._previous_multiple
super(TestWikiSiteDetection, self).tearDown()
print('Out of %d sites, %d tests passed, %d tests failed, '
'%d tests skiped and %d tests raised an error'
% (len(self.all), len(self.passes), len(self.failures),
len(self.skips), len(self.errors))
)
PREFIXES = ['PASS', 'ERR ', 'FAIL', 'SKIP', 'MISS']
sorted_all = sorted((norm(url) for url in self.all),
key=lambda item: item[0])
width = max(len(item[1]) for item in sorted_all)
print('Results:\n' + '\n'.join(
'{0} {1:{3}} : {2}'.format(PREFIXES[i[0]], i[1], i[2], width)
for i in sorted_all))
def _wiki_detection(self, url, result):
"""Perform one load test."""
self.all += [url]
if url in self._urls_tested:
msg = 'Testing URL "{0}" multiple times!'.format(url)
if self.allow_multiple:
print(msg)
else:
self.errors[url] = msg
return
self._urls_tested.add(url)
try:
site = MWSite(url)
except (ServerError, Timeout) as e:
self.skips[url] = e
return
except Exception as e:
print('failure {0} on {1}: {2}'.format(
url, type(e), e))
self.errors[url] = e
return
try:
if result is None:
self.assertIsNone(site)
else:
self.assertIsInstance(site, result)
self.passes[url] = site
except AssertionError as error:
self.failures[url] = error
def assertSite(self, url):
"""Assert a MediaWiki site can be loaded from the url."""
self._wiki_detection(url, MWSite)
def assertNoSite(self, url):
"""Assert a url is not a MediaWiki site."""
self._wiki_detection(url, None)
def assertAllPass(self):
"""Assert that all urls were detected as a MediaWiki site."""
self.assertEqual(set(self.passes), set(self.all) - set(self.skips))
self.assertEqual(self.failures, {})
self.assertEqual(self.errors, {})
def assertAllError(self):
"""Assert that all urls were not detected as a MediaWiki site."""
self.assertEqual(self.passes, {})
self.assertEqual(self.failures, {})
self.assertEqual(set(self.errors), set(self.all) - set(self.skips))
class InterWikiMapDetection(TestWikiSiteDetection):
"""Test all urls on the interwiki map."""
family = 'meta'
code = 'meta'
net = True
allow_multiple = True
def test_IWM(self):
"""Test the load_site method for MW sites on the IWM list."""
data = self.get_site().siteinfo['interwikimap']
for item in data:
if 'local' not in item:
url = item['url']
self.all += [url]
try:
site = MWSite(url)
except Exception as error:
print('failed to load ' + url)
self.errors[url] = error
continue
if type(site) is MWSite:
try:
version = site.version
except Exception as error:
print('failed to get version of ' + url)
self.errors[url] = error
else:
try:
self.assertIsInstance(version, MediaWikiVersion)
self.passes[url] = site
except AssertionError as error:
print('failed to parse version of ' + url)
self.failures[url] = error
class SiteDetectionTestCase(TestWikiSiteDetection):
"""Test all urls on the interwiki map."""
net = True
def test_detect_site(self):
"""Test detection of MediaWiki sites."""
self.assertSite('http://botwiki.sno.cc/wiki/$1')
self.assertSite('http://guildwars.wikia.com/wiki/$1')
self.assertSite('http://www.hrwiki.org/index.php/$1') # v 1.15
self.assertSite('http://www.proofwiki.org/wiki/$1')
self.assertSite(
'http://www.ck-wissen.de/ckwiki/index.php?title=$1')
self.assertSite('http://en.citizendium.org/wiki/$1')
self.assertSite(
'http://www.lojban.org/tiki/tiki-index.php?page=$1')
self.assertSite('http://www.wikichristian.org/index.php?title=$1')
self.assertSite('https://en.wikifur.com/wiki/$1')
self.assertSite('http://bluwiki.com/go/$1')
self.assertSite('http://kino.skripov.com/index.php/$1')
self.assertAllPass()
def test_wikisophia(self):
"""Test wikisophia.org which has redirect problems."""
# /index.php?title=$1 reports 404, however a wiki exists there,
# but the API is also hidden.
self.assertNoSite('http://wikisophia.org/index.php?title=$1')
self.assertAllError()
def test_pre_114_sites(self):
"""Test pre 1.14 sites which should be detected as unsupported."""
# v1.12
self.assertNoSite('http://www.livepedia.gr/index.php?title=$1')
# v1.11
self.assertNoSite('http://www.wikifon.org/$1')
self.assertNoSite('http://glossary.reuters.com/index.php?title=$1')
# v1.11, with no query module
self.assertNoSite('http://wikitree.org/index.php?title=$1')
# v1.9
self.assertNoSite('http://www.wikinvest.com/$1')
self.assertAllError()
def test_non_standard_version_sites(self):
"""Test non-standard version string sites."""
self.assertSite('https://wiki.gentoo.org/wiki/$1')
self.assertSite('http://wiki.arabeyes.org/$1')
self.assertSite('http://tfwiki.net/wiki/$1')
self.assertAllPass()
def test_detect_failure(self):
"""Test detection failure for MediaWiki sites with an API."""
# SSL certificate verification fails
self.assertNoSite('http://hackerspaces.org/wiki/$1')
self.assertAllError()
@unittest.expectedFailure
def test_api_hidden(self):
"""Test MediaWiki sites with a hidden enabled API."""
# api.php is not available
self.assertNoSite('http://wiki.animutationportal.com/index.php/$1')
# HTML looks like it has an API, but redirect rules prevent access
self.assertNoSite('http://www.EcoReality.org/wiki/$1')
self.assertAllError()
def test_api_disabled(self):
"""Test MediaWiki sites without an enabled API."""
self.assertNoSite('http://wiki.linuxquestions.org/wiki/$1')
self.assertAllError()
def test_offline_sites(self):
"""Test offline sites."""
self.assertNoSite('http://seattlewiki.org/wiki/$1')
self.assertAllError()
def test_pre_api_sites(self):
"""Test detection of MediaWiki sites prior to the API."""
self.assertNoSite('http://www.wikif1.org/$1')
self.assertNoSite('http://www.thelemapedia.org/index.php/$1')
self.assertNoSite('http://esperanto.blahus.cz/cxej/vikio/index.php/$1')
self.assertNoSite('http://www.werelate.org/wiki/$1')
self.assertNoSite('http://www.otterstedt.de/wiki/index.php/$1')
self.assertNoSite('http://kb.mozillazine.org/$1')
self.assertAllError()
def test_detect_nosite(self):
"""Test detection of non-wiki sites."""
self.assertNoSite('http://www.imdb.com/name/nm$1/')
self.assertNoSite('http://www.ecyrd.com/JSPWiki/Wiki.jsp?page=$1')
self.assertNoSite('http://operawiki.info/$1')
self.assertNoSite(
'http://www.tvtropes.org/pmwiki/pmwiki.php/Main/$1')
self.assertNoSite('http://c2.com/cgi/wiki?$1')
self.assertNoSite('https://phabricator.wikimedia.org/$1')
self.assertNoSite(
'http://www.merriam-webster.com/cgi-bin/dictionary?book=Dictionary&va=$1')
self.assertNoSite('http://arxiv.org/abs/$1')
self.assertAllError()
def test_musicbrainz_doc(self):
"""Test http://musicbrainz.org/doc/ which has a page 'api.php'."""
# Possible false positive caused by the existance of a page
# called http://musicbrainz.org/doc/api.php
self.assertNoSite('http://musicbrainz.org/doc/$1')
self.assertAllError()
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| |
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import warnings
from functools import reduce
import torch
import pyro
import pyro.poutine as poutine
from pyro.poutine.util import prune_subsample_sites
def _guess_max_plate_nesting(model, args, kwargs):
"""
Guesses max_plate_nesting by running the model once
without enumeration. This optimistically assumes static model
structure.
"""
with poutine.block():
model_trace = poutine.trace(model).get_trace(*args, **kwargs)
sites = [site for site in model_trace.nodes.values() if site["type"] == "sample"]
dims = [
frame.dim
for site in sites
for frame in site["cond_indep_stack"]
if frame.vectorized
]
max_plate_nesting = -min(dims) if dims else 0
return max_plate_nesting
def _predictive_sequential(
model,
posterior_samples,
model_args,
model_kwargs,
num_samples,
return_site_shapes,
return_trace=False,
):
collected = []
samples = [
{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)
]
for i in range(num_samples):
trace = poutine.trace(poutine.condition(model, samples[i])).get_trace(
*model_args, **model_kwargs
)
if return_trace:
collected.append(trace)
else:
collected.append(
{site: trace.nodes[site]["value"] for site in return_site_shapes}
)
if return_trace:
return collected
else:
return {
site: torch.stack([s[site] for s in collected]).reshape(shape)
for site, shape in return_site_shapes.items()
}
def _predictive(
model,
posterior_samples,
num_samples,
return_sites=(),
return_trace=False,
parallel=False,
model_args=(),
model_kwargs={},
):
model = torch.no_grad()(poutine.mask(model, mask=False))
max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)
vectorize = pyro.plate(
"_num_predictive_samples", num_samples, dim=-max_plate_nesting - 1
)
model_trace = prune_subsample_sites(
poutine.trace(model).get_trace(*model_args, **model_kwargs)
)
reshaped_samples = {}
for name, sample in posterior_samples.items():
sample_shape = sample.shape[1:]
sample = sample.reshape(
(num_samples,)
+ (1,) * (max_plate_nesting - len(sample_shape))
+ sample_shape
)
reshaped_samples[name] = sample
if return_trace:
trace = poutine.trace(
poutine.condition(vectorize(model), reshaped_samples)
).get_trace(*model_args, **model_kwargs)
return trace
return_site_shapes = {}
for site in model_trace.stochastic_nodes + model_trace.observation_nodes:
append_ndim = max_plate_nesting - len(model_trace.nodes[site]["fn"].batch_shape)
site_shape = (
(num_samples,) + (1,) * append_ndim + model_trace.nodes[site]["value"].shape
)
# non-empty return-sites
if return_sites:
if site in return_sites:
return_site_shapes[site] = site_shape
# special case (for guides): include all sites
elif return_sites is None:
return_site_shapes[site] = site_shape
# default case: return sites = ()
# include all sites not in posterior samples
elif site not in posterior_samples:
return_site_shapes[site] = site_shape
# handle _RETURN site
if return_sites is not None and "_RETURN" in return_sites:
value = model_trace.nodes["_RETURN"]["value"]
shape = (num_samples,) + value.shape if torch.is_tensor(value) else None
return_site_shapes["_RETURN"] = shape
if not parallel:
return _predictive_sequential(
model,
posterior_samples,
model_args,
model_kwargs,
num_samples,
return_site_shapes,
return_trace=False,
)
trace = poutine.trace(
poutine.condition(vectorize(model), reshaped_samples)
).get_trace(*model_args, **model_kwargs)
predictions = {}
for site, shape in return_site_shapes.items():
value = trace.nodes[site]["value"]
if site == "_RETURN" and shape is None:
predictions[site] = value
continue
if value.numel() < reduce((lambda x, y: x * y), shape):
predictions[site] = value.expand(shape)
else:
predictions[site] = value.reshape(shape)
return predictions
class Predictive(torch.nn.Module):
"""
EXPERIMENTAL class used to construct predictive distribution. The predictive
distribution is obtained by running the `model` conditioned on latent samples
from `posterior_samples`. If a `guide` is provided, then posterior samples
from all the latent sites are also returned.
.. warning::
The interface for the :class:`Predictive` class is experimental, and
might change in the future.
:param model: Python callable containing Pyro primitives.
:param dict posterior_samples: dictionary of samples from the posterior.
:param callable guide: optional guide to get posterior samples of sites not present
in `posterior_samples`.
:param int num_samples: number of samples to draw from the predictive distribution.
This argument has no effect if ``posterior_samples`` is non-empty, in which case,
the leading dimension size of samples in ``posterior_samples`` is used.
:param return_sites: sites to return; by default only sample sites not present
in `posterior_samples` are returned.
:type return_sites: list, tuple, or set
:param bool parallel: predict in parallel by wrapping the existing model
in an outermost `plate` messenger. Note that this requires that the model has
all batch dims correctly annotated via :class:`~pyro.plate`. Default is `False`.
"""
def __init__(
self,
model,
posterior_samples=None,
guide=None,
num_samples=None,
return_sites=(),
parallel=False,
):
super().__init__()
if posterior_samples is None:
if num_samples is None:
raise ValueError(
"Either posterior_samples or num_samples must be specified."
)
posterior_samples = {}
for name, sample in posterior_samples.items():
batch_size = sample.shape[0]
if num_samples is None:
num_samples = batch_size
elif num_samples != batch_size:
warnings.warn(
"Sample's leading dimension size {} is different from the "
"provided {} num_samples argument. Defaulting to {}.".format(
batch_size, num_samples, batch_size
),
UserWarning,
)
num_samples = batch_size
if num_samples is None:
raise ValueError(
"No sample sites in posterior samples to infer `num_samples`."
)
if guide is not None and posterior_samples:
raise ValueError(
"`posterior_samples` cannot be provided with the `guide` argument."
)
if return_sites is not None:
assert isinstance(return_sites, (list, tuple, set))
self.model = model
self.posterior_samples = {} if posterior_samples is None else posterior_samples
self.num_samples = num_samples
self.guide = guide
self.return_sites = return_sites
self.parallel = parallel
def call(self, *args, **kwargs):
"""
Method that calls :meth:`forward` and returns parameter values of the
guide as a `tuple` instead of a `dict`, which is a requirement for
JIT tracing. Unlike :meth:`forward`, this method can be traced by
:func:`torch.jit.trace_module`.
.. warning::
This method may be removed once PyTorch JIT tracer starts accepting
`dict` as valid return types. See
`issue <https://github.com/pytorch/pytorch/issues/27743>`_.
"""
result = self.forward(*args, **kwargs)
return tuple(v for _, v in sorted(result.items()))
def forward(self, *args, **kwargs):
"""
Returns dict of samples from the predictive distribution. By default, only sample sites not
contained in `posterior_samples` are returned. This can be modified by changing the
`return_sites` keyword argument of this :class:`Predictive` instance.
.. note:: This method is used internally by :class:`~torch.nn.Module`.
Users should instead use :meth:`~torch.nn.Module.__call__` as in
``Predictive(model)(*args, **kwargs)``.
:param args: model arguments.
:param kwargs: model keyword arguments.
"""
posterior_samples = self.posterior_samples
return_sites = self.return_sites
if self.guide is not None:
# return all sites by default if a guide is provided.
return_sites = None if not return_sites else return_sites
posterior_samples = _predictive(
self.guide,
posterior_samples,
self.num_samples,
return_sites=None,
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
)
return _predictive(
self.model,
posterior_samples,
self.num_samples,
return_sites=return_sites,
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
)
def get_samples(self, *args, **kwargs):
warnings.warn(
"The method `.get_samples` has been deprecated in favor of `.forward`.",
DeprecationWarning,
)
return self.forward(*args, **kwargs)
def get_vectorized_trace(self, *args, **kwargs):
"""
Returns a single vectorized `trace` from the predictive distribution. Note that this
requires that the model has all batch dims correctly annotated via :class:`~pyro.plate`.
:param args: model arguments.
:param kwargs: model keyword arguments.
"""
posterior_samples = self.posterior_samples
if self.guide is not None:
posterior_samples = _predictive(
self.guide,
posterior_samples,
self.num_samples,
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
)
return _predictive(
self.model,
posterior_samples,
self.num_samples,
return_trace=True,
model_args=args,
model_kwargs=kwargs,
)
| |
import os
import re
import subprocess
from django.utils.text import slugify
from django.conf import settings
from django.core.cache import cache
# These options are passed to Fabric as: fab task --abort-on-prompts=True --user=root ...
fabric_special_options = ['no_agent', 'forward-agent', 'config', 'disable-known-hosts', 'keepalive',
'password', 'parallel', 'no-pty', 'reject-unknown-hosts', 'skip-bad-hosts', 'timeout',
'command-timeout', 'user', 'warn-only', 'pool-size']
def check_output(command, shell=False):
executable = None
if shell:
executable = getattr(settings, 'SHELL', '/bin/sh')
return subprocess.check_output(command, shell=shell, executable=executable)
def check_output_with_ssh_key(command):
if getattr(settings, 'GIT_SSH_KEY_LOCATION', None):
return check_output('ssh-agent bash -c "ssh-add {};{}"'.format(settings.GIT_SSH_KEY_LOCATION, command),
shell=True)
else:
return check_output([command], shell=True)
def update_project_git(project, cache_dir, repo_dir):
if not os.path.exists(repo_dir):
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
check_output_with_ssh_key('git clone {} {}'.format(project.repo_url, repo_dir))
else:
check_output_with_ssh_key(
'cd {0};git stash;git pull'.format(repo_dir)
)
def setup_virtual_env_if_needed(repo_dir):
env_dir = os.path.join(repo_dir, 'env')
if not os.path.exists(env_dir):
os.makedirs(env_dir)
check_output("virtualenv {}".format(env_dir), shell=True)
def update_project_requirements(project, repo_dir, activate_loc):
pip_installs = ' '.join(project.fabfile_requirements.splitlines())
check_output_with_ssh_key('source {} && cd {};pip install {}'.format(activate_loc, repo_dir, pip_installs))
def get_fabfile_path(project):
if project.use_repo_fabfile:
cache_key = 'project_{}_fabfile_path'.format(project.pk)
cached_result = cache.get(cache_key)
if cached_result:
return cached_result
cache_dir = os.path.join(settings.PUBLIC_DIR, '.repo_caches')
repo_dir = os.path.join(cache_dir, slugify(project.name))
update_project_git(project, cache_dir, repo_dir)
setup_virtual_env_if_needed(repo_dir)
activate_loc = os.path.join(repo_dir, 'env', 'bin', 'activate')
update_project_requirements(project, repo_dir, activate_loc)
result = os.path.join(repo_dir, 'fabfile.py'), activate_loc
cache.set(cache_key, result, settings.FABRIC_TASK_CACHE_TIMEOUT)
return result
else:
return settings.FABFILE_PATH, None
def parse_task_details(name, task_output):
lines = task_output.splitlines()
docstring = '\n'.join([line.strip() for line in lines[2:-2]]).strip()
arguments_line = lines[-2].strip()
if docstring == 'No docstring provided':
docstring = None
arguments_line = arguments_line[11:].strip()
arguments = []
if arguments_line:
for arg in arguments_line.split(', '):
m = re.match(r"^([^=]+)(=(\'?)([^']*)\3)?$", arg)
if m.group(2): # found argument with default value
if m.group(3) == "'": # default value is a string
arguments.append((m.group(1), m.group(4)))
else: # found an argument with some other default value.
# all fab arguments are translated to strings, so this doesnt make sense. Ignore the default.
arguments.append(m.group(1))
else:
arguments.append(m.group(1))
return name, docstring, arguments
def get_fabric_tasks(project):
"""
Generate a list of fabric tasks that are available
"""
cache_key = 'project_{}_fabfile_tasks'.format(project.pk)
cached_result = cache.get(cache_key)
if cached_result:
return cached_result
try:
fabfile_path, activate_loc = get_fabfile_path(project)
if activate_loc:
output = check_output('source {};fab --list --list-format=short --fabfile={}'.format(activate_loc, fabfile_path), shell=True)
else:
output = check_output(['fab', '--list', '--list-format=short', '--fabfile={}'.format(fabfile_path)])
lines = output.splitlines()
tasks = []
for line in lines:
name = line.strip()
if activate_loc:
o = check_output(
'source {};fab --display={} --fabfile={}'.format(activate_loc, name, fabfile_path),
shell=True
)
else:
o = check_output(
['fab', '--display={}'.format(name), '--fabfile={}'.format(fabfile_path)]
)
tasks.append(parse_task_details(name, o))
cache.set(cache_key, tasks, settings.FABRIC_TASK_CACHE_TIMEOUT)
except Exception as e:
tasks = []
return tasks
def get_task_details(project, task_name):
for details in get_fabric_tasks(project):
if details[0] == task_name:
return details
return None
def clean_key_string(key):
key = key.replace('"', '\\"') # escape double quotes
key = key.replace(',', '\,') # escape commas, that would be adding a new value
key = key.replace('=', '\=') # escape = because that would be setting a new key
return key
def clean_value_string(value):
value = value.replace('"', '\\"') # escape double quotes
value = value.replace(',', '\,') # escape commas, that would be adding a new value
value = value.replace('=', '\=') # escape = because that would be setting a new key
return value
def clean_arg_key_string(key):
# this has to be a valid python function argument, so we can get pretty strict here
key = re.sub(r'[^0-9a-zA-Z_]', '', key) # remove anything that isn't a number, letter, or underscore
return key
def get_key_value_string(key, config):
key = clean_key_string(key)
if config.data_type == config.BOOLEAN_TYPE:
return key + ('' if config.get_value() else '=')
elif config.data_type == config.NUMBER_TYPE:
return key + '=' + str(config.get_value())
else:
return '{}={}'.format(key, clean_value_string(config.get_value()))
def update_config_values_from_session(configs, session):
configs = configs.copy()
for key, config in configs.iteritems():
if session.get('configuration_values', {}).get(key, None) is not None:
config.set_value(session['configuration_values'][key])
del session['configuration_values'][key]
arg_values = session.get('configuration_values', {})
return configs, arg_values
def build_command(deployment, session, abort_on_prompts=True):
# Get the dictionary of configurations for this stage
configs = deployment.stage.get_configurations()
configs, arg_values = update_config_values_from_session(configs, session)
task_args = [key for key, config in configs.iteritems() if config.task_argument and config.task_name == deployment.task.name]
task_configs = [key for key, config in configs.iteritems() if not config.task_argument]
command_to_config = {x.replace('-', '_'): x for x in fabric_special_options}
# Take the special env variables out
normal_task_configs = list(set(task_configs) - set(command_to_config.keys()))
# Special ones get set a different way
special_task_configs = list(set(task_configs) & set(command_to_config.keys()))
command = 'fab ' + deployment.task.name
task_details = get_task_details(deployment.stage.project, deployment.task.name)
task_args = list(set(task_args + [x[0] if isinstance(x, tuple) else x for x in task_details[2]]))
if task_args:
key_value_strings = []
for key in task_args:
if key in configs:
value = unicode(configs[key].get_value())
elif key in arg_values:
value = unicode(arg_values[key])
else:
continue
cleaned_key = clean_arg_key_string(key)
value = clean_value_string(value)
key_value_strings.append('{}="{}"'.format(cleaned_key, value))
if key_value_strings:
command += ':'
command += ','.join(key_value_strings)
if normal_task_configs:
command += ' --set '
command += '"' + ','.join(get_key_value_string(key, configs[key]) for key in normal_task_configs) + '"'
if special_task_configs:
for key in special_task_configs:
command += ' --' + get_key_value_string(command_to_config[key], configs[key])
if abort_on_prompts:
command += ' --abort-on-prompts'
hosts = deployment.stage.hosts.values_list('name', flat=True)
if hosts:
command += ' --hosts=' + ','.join(hosts)
fabfile_path, active_loc = get_fabfile_path(deployment.stage.project)
command += ' --fabfile={}'.format(fabfile_path)
if active_loc:
return 'source {};'.format(active_loc) + ' ' + command
else:
return command
| |
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for cinder.db.api.Worker"""
import time
import uuid
from oslo_db import exception as db_exception
import six
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_constants as fake
class DBAPIWorkerTestCase(test.TestCase, test.ModelsObjectComparatorMixin):
worker_fields = {'resource_type': 'Volume',
'resource_id': fake.VOLUME_ID,
'status': 'creating'}
def _uuid(self):
return six.text_type(uuid.uuid4())
def setUp(self):
super(DBAPIWorkerTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_worker_create_and_get(self):
"""Test basic creation of a worker record."""
worker = db.worker_create(self.ctxt, **self.worker_fields)
db_worker = db.worker_get(self.ctxt, id=worker.id)
self._assertEqualObjects(worker, db_worker)
def test_worker_create_unique_constrains(self):
"""Test when we use an already existing resource type and id."""
db.worker_create(self.ctxt, **self.worker_fields)
self.assertRaises(exception.WorkerExists, db.worker_create,
self.ctxt,
resource_type=self.worker_fields['resource_type'],
resource_id=self.worker_fields['resource_id'],
status='not_' + self.worker_fields['status'])
def test_worker_create_missing_required_field(self):
"""Try creating a worker with a missing required field."""
for field in self.worker_fields:
params = self.worker_fields.copy()
del params[field]
self.assertRaises(db_exception.DBError, db.worker_create,
self.ctxt, **params)
def test_worker_create_invalid_field(self):
"""Try creating a worker with a non existent db field."""
self.assertRaises(TypeError, db.worker_create, self.ctxt,
myfield='123', **self.worker_fields)
def test_worker_get_non_existent(self):
"""Check basic non existent worker record get method."""
db.worker_create(self.ctxt, **self.worker_fields)
self.assertRaises(exception.WorkerNotFound, db.worker_get,
self.ctxt, service_id='1', **self.worker_fields)
def _create_workers(self, num, read_back=False, **fields):
workers = []
base_params = self.worker_fields.copy()
base_params.update(fields)
for i in range(num):
params = base_params.copy()
params['resource_id'] = self._uuid()
workers.append(db.worker_create(self.ctxt, **params))
if read_back:
for i in range(len(workers)):
workers[i] = db.worker_get(self.ctxt, id=workers[i].id)
return workers
def test_worker_get_all(self):
"""Test basic get_all method."""
self._create_workers(1)
service = db.service_create(self.ctxt, {})
workers = self._create_workers(3, service_id=service.id)
db_workers = db.worker_get_all(self.ctxt, service_id=service.id)
self._assertEqualListsOfObjects(workers, db_workers)
def test_worker_get_all_until(self):
"""Test get_all until a specific time."""
workers = self._create_workers(3, read_back=True)
timestamp = workers[-1].updated_at
time.sleep(0.1)
self._create_workers(3)
db_workers = db.worker_get_all(self.ctxt, until=timestamp)
self._assertEqualListsOfObjects(workers, db_workers)
def test_worker_get_all_returns_empty(self):
"""Test that get_all returns an empty list when there's no results."""
self._create_workers(3, deleted=True)
db_workers = db.worker_get_all(self.ctxt)
self.assertListEqual([], db_workers)
def test_worker_update_not_exists(self):
"""Test worker update when the worker doesn't exist."""
self.assertRaises(exception.WorkerNotFound, db.worker_update,
self.ctxt, 1)
def test_worker_update(self):
"""Test basic worker update."""
worker = self._create_workers(1)[0]
worker = db.worker_get(self.ctxt, id=worker.id)
res = db.worker_update(self.ctxt, worker.id, service_id=1)
self.assertEqual(1, res)
worker.service_id = 1
db_worker = db.worker_get(self.ctxt, id=worker.id)
self._assertEqualObjects(worker, db_worker, ['updated_at'])
def test_worker_update_update_orm(self):
"""Test worker update updating the worker orm object."""
worker = self._create_workers(1)[0]
res = db.worker_update(self.ctxt, worker.id, orm_worker=worker,
service_id=1)
self.assertEqual(1, res)
db_worker = db.worker_get(self.ctxt, id=worker.id)
self._assertEqualObjects(worker, db_worker, ['updated_at'])
def test_worker_destroy(self):
"""Test that worker destroy really deletes the DB entry."""
worker = self._create_workers(1)[0]
res = db.worker_destroy(self.ctxt, id=worker.id)
self.assertEqual(1, res)
db_workers = db.worker_get_all(self.ctxt, read_deleted='yes')
self.assertListEqual([], db_workers)
def test_worker_destroy_non_existent(self):
"""Test that worker destroy returns 0 when entry doesn't exist."""
res = db.worker_destroy(self.ctxt, id=1)
self.assertEqual(0, res)
def test_worker_claim(self):
"""Test worker claim of normal DB entry."""
service_id = 1
worker = db.worker_create(self.ctxt, resource_type='Volume',
resource_id=fake.VOLUME_ID,
status='deleting')
res = db.worker_claim_for_cleanup(self.ctxt, service_id, worker)
self.assertEqual(1, res)
db_worker = db.worker_get(self.ctxt, id=worker.id)
self._assertEqualObjects(worker, db_worker, ['updated_at'])
self.assertEqual(service_id, db_worker.service_id)
self.assertEqual(worker.service_id, db_worker.service_id)
def test_worker_claim_fails_status_change(self):
"""Test that claim fails if the work entry has changed its status."""
worker = db.worker_create(self.ctxt, resource_type='Volume',
resource_id=fake.VOLUME_ID,
status='deleting')
worker.status = 'creating'
res = db.worker_claim_for_cleanup(self.ctxt, 1, worker)
self.assertEqual(0, res)
db_worker = db.worker_get(self.ctxt, id=worker.id)
self._assertEqualObjects(worker, db_worker, ['status'])
self.assertIsNone(db_worker.service_id)
def test_worker_claim_fails_service_change(self):
"""Test that claim fails on worker service change."""
failed_service = 1
working_service = 2
this_service = 3
worker = db.worker_create(self.ctxt, resource_type='Volume',
resource_id=fake.VOLUME_ID,
status='deleting',
service_id=working_service)
worker.service_id = failed_service
res = db.worker_claim_for_cleanup(self.ctxt, this_service, worker)
self.assertEqual(0, res)
db_worker = db.worker_get(self.ctxt, id=worker.id)
self.assertEqual(working_service, db_worker.service_id)
def test_worker_claim_same_service(self):
"""Test worker claim of a DB entry that has our service_id."""
service_id = 1
worker = db.worker_create(self.ctxt, resource_type='Volume',
resource_id=fake.VOLUME_ID,
status='deleting', service_id=service_id)
# Read from DB to get updated_at field
worker = db.worker_get(self.ctxt, id=worker.id)
claimed_worker = db.worker_get(self.ctxt, id=worker.id)
res = db.worker_claim_for_cleanup(self.ctxt,
service_id,
claimed_worker)
self.assertEqual(1, res)
db_worker = db.worker_get(self.ctxt, id=worker.id)
self._assertEqualObjects(claimed_worker, db_worker)
self._assertEqualObjects(worker, db_worker, ['updated_at'])
self.assertNotEqual(worker.updated_at, db_worker.updated_at)
def test_worker_claim_fails_this_service_claimed(self):
"""Test claim fails when worker was already claimed by this service."""
service_id = 1
worker = db.worker_create(self.ctxt, resource_type='Volume',
resource_id=fake.VOLUME_ID,
status='creating',
service_id=service_id)
# Read it back to have the updated_at value
worker = db.worker_get(self.ctxt, id=worker.id)
claimed_worker = db.worker_get(self.ctxt, id=worker.id)
time.sleep(0.1)
# Simulate that this service starts processing this entry
res = db.worker_claim_for_cleanup(self.ctxt,
service_id,
claimed_worker)
self.assertEqual(1, res)
res = db.worker_claim_for_cleanup(self.ctxt, service_id, worker)
self.assertEqual(0, res)
db_worker = db.worker_get(self.ctxt, id=worker.id)
self._assertEqualObjects(claimed_worker, db_worker)
self._assertEqualObjects(worker, db_worker, ['updated_at'])
self.assertNotEqual(worker.updated_at, db_worker.updated_at)
| |
'''
Contact managing views
'''
import json
from datetime import date, datetime, timedelta
from django import forms
from django.contrib import messages
from django.contrib.admin import filters
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.auth import password_validation
from django.core.exceptions import PermissionDenied
from django.core.files.uploadedfile import UploadedFile
from django.db.models import Q
from django.db.models.query import RawQuerySet, sql
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template import loader
from django.urls import reverse
from django.utils import formats, html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.views.generic import (CreateView, FormView, TemplateView,
UpdateView, View)
from django.views.generic.edit import ModelFormMixin
from ngw.core import perms
from ngw.core.contactsearch import parse_filterstring
from ngw.core.models import (FIELD_BIRTHDAY, FIELD_COLUMNS,
FIELD_DEFAULT_GROUP, GROUP_EVERYBODY, GROUP_USER,
GROUP_USER_NGW, LOG_ACTION_ADD, LOG_ACTION_CHANGE,
Config, Contact, ContactField, ContactFieldValue,
ContactGroup, ContactInGroup, Log)
from ngw.core.nav import Navbar
from ngw.core.views.generic import (InGroupAcl, NgwDeleteView, NgwListView,
NgwUserAcl)
from ngw.core.widgets import FlagsField
DISP_NAME = 'name'
DISP_FIELD_PREFIX = 'field_'
DISP_GROUP_PREFIX = 'group_'
FTYPE_TEXT = 'TEXT'
FTYPE_LONGTEXT = 'LONGTEXT'
FTYPE_NUMBER = 'NUMBER'
FTYPE_DATE = 'DATE'
FTYPE_EMAIL = 'EMAIL'
FTYPE_PHONE = 'PHONE'
FTYPE_RIB = 'RIB'
FTYPE_CHOICE = 'CHOICE'
FTYPE_MULTIPLECHOICE = 'MULTIPLECHOICE'
FTYPE_PASSWORD = 'PASSWORD'
#######################################################################
#
# Contact list
#
#######################################################################
class ContactQuerySet(RawQuerySet):
def __init__(self, *args, **kargs):
super().__init__('', *args, **kargs)
self.qry_fields = {'id': 'contact.id', 'name': 'name'}
self.qry_from = ['contact']
self.qry_where = []
self.qry_orderby = []
self.offset = None
self.limit = None
self.__hack_for_changelist()
def __hack_for_changelist(self):
self.query.select_related = True
# raw_query = self
# def get_order_by(self, value):
# return raw_query.qry_orderby
# def set_order_by(self, value):
# raw_query.qry_orderby = value
# self.query.order_by = property(get_order_by, set_order_by)
self.query.order_by = []
def __repr__(self):
self.compile()
return super().__repr__()
def add_field(self, fieldid):
'''
Add a field to query.
The caller is reponsible for checking requesting user is authorized to
query that field.
'''
fieldid = str(fieldid)
self.qry_from.append(
'LEFT JOIN contact_field_value AS cfv{fid}'
' ON (contact.id = cfv{fid}.contact_id'
' AND cfv{fid}.contact_field_id = {fid})'
.format(fid=fieldid))
self.qry_fields[DISP_FIELD_PREFIX+fieldid] = 'cfv{fid}.value'.format(
fid=fieldid)
def add_group(self, group_id):
'''
Add a group to query.
The caller is reponsible for checking requesting user is authorized to
view that group's members.
'''
group_flags_key = 'group_{}_flags'.format(group_id)
if group_flags_key in self.qry_fields:
# We already have these fields
return
# Add column for direct membership / admin
self.qry_fields[group_flags_key] = 'cig_{}.flags'.format(group_id)
self.qry_from.append(
'LEFT JOIN contact_in_group AS cig_{gid}'
' ON (contact.id = cig_{gid}.contact_id'
' AND cig_{gid}.group_id={gid}'
')'
.format(gid=group_id))
# Add column for indirect membership
self.qry_fields['group_{}_inherited_flags'.format(group_id)] = (
'cig_inherited_{}.flags'.format(group_id))
self.qry_from.append('''
LEFT JOIN (
SELECT contact_id, bit_or(flags) AS flags
FROM contact_in_group
WHERE contact_in_group.group_id
IN (SELECT self_and_subgroups({gid}))
AND contact_in_group.group_id<>{gid}
GROUP BY contact_id) AS cig_inherited_{gid}
ON (contact.id = cig_inherited_{gid}.contact_id)
'''.format(gid=group_id))
# Add column for inherited admin
self.qry_fields['group_{}_inherited_aflags'.format(group_id)] = (
'gmg_inherited_{}.flags'.format(group_id))
self.qry_from.append('''
LEFT JOIN (
SELECT contact_id, bit_or(gmg_perms.flags) AS flags
FROM contact_in_group
JOIN (
SELECT self_and_subgroups(father_id) AS group_id,
bit_or(flags) AS flags
FROM group_manage_group
WHERE subgroup_id={gid}
GROUP BY group_id
) AS gmg_perms
ON contact_in_group.group_id=gmg_perms.group_id
AND contact_in_group.flags & 1 <> 0
GROUP BY contact_id
) AS gmg_inherited_{gid}
ON contact.id=gmg_inherited_{gid}.contact_id
'''.format(gid=group_id))
self.qry_fields['group_{}_note'.format(group_id)] = (
'cig_{}.note'.format(group_id))
def add_messages(self, group_id):
'''
Add column with how many messages are there.
'''
self.qry_fields['group_{}_msgcount'.format(group_id)] = '''(
SELECT count(*)
FROM contact_message
WHERE contact_message.contact_id = contact.id
AND group_id = {group_id}
)'''.format(group_id=group_id)
self.qry_fields['group_{}_unreadcount'.format(group_id)] = '''(
SELECT count(*)
FROM contact_message
WHERE contact_message.contact_id = contact.id
AND group_id = {group_id}
AND is_answer
AND read_date IS NULL
)'''.format(group_id=group_id)
def add_busy(self, group_id=None):
'''
Add a "busy" column with a summary of availability of that contact.
Use the date of the group, if any.
Use current date otherwith.
'''
colname = 'busy'
if colname in self.qry_fields:
return # already there!
if group_id is not None:
self.qry_from.append('''
LEFT JOIN (
SELECT
contact_id,
bit_or(flags) & 3 AS busy
FROM v_cig_membership_inherited
JOIN contact_group
ON v_cig_membership_inherited.group_id=contact_group.id
AND contact_group.busy -- Only "busy" group
WHERE contact_group.date IS NOT NULL
AND daterange(contact_group.date,
contact_group.end_date,
'[]')
-- && daterange('2017-08-01', '2017-08-31', '[]')
&& ( SELECT daterange(date, end_date, '[]')
FROM contact_group WHERE id={gid})
AND v_cig_membership_inherited.group_id != {gid}
GROUP BY contact_id
) AS busy_sub
ON contact.id=busy_sub.contact_id
'''.format(gid=group_id))
else:
self.qry_from.append('''
LEFT JOIN (
SELECT
contact_id,
bit_or(flags) & 3 AS busy
FROM v_cig_membership_inherited
JOIN contact_group
ON v_cig_membership_inherited.group_id=contact_group.id
AND contact_group.busy -- Only "busy" group
WHERE contact_group.date IS NOT NULL
AND daterange(contact_group.date,
contact_group.end_date,
'[]')
@> current_date
GROUP BY contact_id
) AS busy_sub
ON contact.id=busy_sub.contact_id
''')
self.qry_fields[colname] = 'COALESCE(busy, 0)'
def add_birthday(self, cg=None):
'''
Add a "birthday" column for that that contact.
Use the date of the group, if any.
Use current date otherwith.
'''
colname = 'birthday'
if colname in self.qry_fields:
return # already there!
if cg is not None:
self.qry_from.append(
'''
LEFT JOIN contact_field_value AS cfvbirthday
ON (contact.id = cfvbirthday.contact_id
AND cfvbirthday.contact_field_id = {fid}
AND daterange('{startdate}'::date,
'{enddate}'::date,
'[]')
@> birthday_after_date(value::date,
'{startdate}'::date)
)
'''.format(
startdate=cg.date,
enddate=cg.end_date,
fid=FIELD_BIRTHDAY))
else:
self.qry_from.append(
'''
LEFT JOIN contact_field_value AS cfvbirthday
ON (contact.id = cfvbirthday.contact_id
AND cfvbirthday.contact_field_id = {fid}
AND to_char(value::date, 'MM-DD')
= to_char(current_date, 'MM-DD')
)
'''.format(fid=FIELD_BIRTHDAY))
self.qry_fields[colname] = 'cfvbirthday.value'
def filter(self, extrawhere=None, pk__in=None):
if extrawhere is not None:
self.qry_where.append(extrawhere)
if pk__in:
self.qry_where.append('contact.id IN ({})'.format(
','.join(pk__in)))
return self
def add_params(self, params):
if self.params:
self.params.update(params)
else:
self.params = params
return self
def order_by(self, *names):
# print('qs.order_by', repr(names))
for name in names:
if name != 'pk' and name != '-pk':
self.qry_orderby.append(name)
return self
def compile(self):
qry = 'SELECT '
qry += ', '.join(['{} AS "{}"'.format(v, k)
for k, v in self.qry_fields.items()])
qry += ' FROM ' + ' '.join(self.qry_from)
if self.qry_where:
qry += ' WHERE ( ' + ') AND ('.join(self.qry_where) + ' )'
if self.qry_orderby:
order = []
for by in self.qry_orderby:
if by[0] == '-':
order.append(by[1:]+' DESC')
else:
order.append(by)
qry += ' ORDER BY ' + ', '.join(order)
if self.offset:
qry += ' OFFSET {}'.format(self.offset)
if self.limit:
qry += ' LIMIT {}'.format(self.limit)
self.raw_query = qry
self.query = sql.RawQuery(sql=qry, using=self.db, params=self.params)
self.__hack_for_changelist()
# print(repr(self.raw_query), repr(self.params))
def count(self):
qry = 'SELECT '
qry += ', '.join(['{} AS {}'.format(v, k)
for k, v in self.qry_fields.items()])
qry += ' FROM ' + ' '.join(self.qry_from)
if self.qry_where:
qry += ' WHERE (' + ') AND ('.join(self.qry_where) + ')'
countqry = 'SELECT COUNT(*) FROM ('+qry+') AS qry_count'
for count, in sql.RawQuery(sql=countqry,
using=self.db,
params=self.params):
return count
def __iter__(self):
self.compile()
for x in RawQuerySet.__iter__(self):
yield x
def __getitem__(self, k):
if isinstance(k, slice):
self.offset = k.start or 0
if k.stop is not None:
self.limit = k.stop - self.offset
if k.step:
raise NotImplementedError
return self
# return only one:
self.offset = k
self.limit = 1
for contact in self:
return contact
def _clone(self):
return self # FIXME ugly hack for ChangeList
def get_default_columns(user):
# check the field still exists
result = []
default_fields = user.get_fieldvalue_by_id(FIELD_COLUMNS)
if not default_fields:
try:
default_fields = Config.objects.get(pk='columns').text
except Config.DoesNotExist:
pass
if not default_fields:
default_fields = ''
for fname in default_fields.split(','):
if fname == 'name' or fname == 'busy':
pass
elif fname.startswith(DISP_GROUP_PREFIX):
try:
groupid = int(fname[len(DISP_GROUP_PREFIX):])
except ValueError:
print('Error in default fields: {} has invalid syntax.'
.format(fname))
continue
try:
ContactGroup.objects.get(pk=groupid)
except ContactGroup.DoesNotExist:
print('Error in default fields: There is no group #{}.'
.format(groupid))
continue
elif fname.startswith(DISP_FIELD_PREFIX):
try:
fieldid = int(fname[len(DISP_FIELD_PREFIX):])
except ValueError:
print('Error in default fields: {} has invalid syntax.'
.format(fname))
continue
try:
ContactField.objects.get(pk=fieldid)
except ContactField.DoesNotExist:
print('Error in default fields: There is no field #{}.'
.format(fieldid))
continue
else:
print('Error in default fields: Invalid syntax in "{}".'
.format(fname))
continue
result.append(fname)
if not result:
result = [DISP_NAME]
return result
def get_available_columns(user_id):
'''
Return all available columns on contact list, based on user permission.
Used in column selection.
'''
result = [(DISP_NAME, _('Name')),
('busy', _('Busy'))]
for cf in ContactField.objects.with_user_perms(user_id):
result.append((DISP_FIELD_PREFIX+str(cf.id), cf.name))
for cg in (
ContactGroup.objects
.with_user_perms(user_id,
wanted_flags=perms.SEE_MEMBERS,
add_column=False)
.order_by('-date', 'name')):
result.append((DISP_GROUP_PREFIX+str(cg.id), str(cg)))
return result
class FieldSelectForm(forms.Form):
'''
Forms to select fields & groups to display. Only displays:
- readable field
- groups whose members can be viewed
'''
def __init__(self, user, *args, **kargs):
super().__init__(*args, **kargs)
self.fields['fields'] = forms.MultipleChoiceField(
required=False, widget=FilteredSelectMultiple(_('Fields'), False),
choices=get_available_columns(user.id))
def membership_to_text(contact_with_extra_fields, group_id):
flags = getattr(contact_with_extra_fields,
'group_{}_flags'.format(group_id))
if flags is None:
flags = 0
flags_inherited = getattr(contact_with_extra_fields,
'group_{}_inherited_flags'.format(group_id))
if flags_inherited is None:
flags_inherited = 0
flags_ainherited = getattr(contact_with_extra_fields,
'group_{}_inherited_aflags'.format(group_id))
if flags_ainherited is None:
flags_ainherited = 0
return perms.int_to_text(
flags,
(flags_inherited & ~perms.ADMIN_ALL)
| (flags_ainherited & perms.ADMIN_ALL))
def membership_to_text_factory(group_id):
return lambda contact_with_extra_fields: \
membership_to_text(contact_with_extra_fields, group_id)
def membership_extended_widget(request, contact_with_extra_fields,
contact_group):
flags = getattr(contact_with_extra_fields,
'group_{}_flags'.format(contact_group.id))
msg_count = getattr(contact_with_extra_fields,
'group_{}_msgcount'.format(contact_group.id),
0)
msg_count_unread = getattr(contact_with_extra_fields,
'group_{}_unreadcount'.format(contact_group.id),
0)
return loader.render_to_string('membership_widget.html', {
'cid': contact_with_extra_fields.id,
'gid': contact_group.id,
'virtual_group': contact_group.virtual, # TODO: use this
'membership_str': membership_to_text(contact_with_extra_fields,
contact_group.id),
'note': getattr(contact_with_extra_fields,
'group_{}_note'.format(contact_group.id)) or '',
'membership': perms.int_to_flags(flags or 0),
'cig_url': contact_group.get_absolute_url()
+ 'members/'
+ str(contact_with_extra_fields.id),
'title': _('{contact} in group {group}').format(
contact=contact_with_extra_fields,
group=contact_group),
'msg_count': msg_count,
'msg_count_unread': msg_count_unread,
})
def membership_extended_widget_factory(request, contact_group):
return lambda contact_with_extra_fields: \
membership_extended_widget(
request, contact_with_extra_fields, contact_group)
def field_widget(contact_field, contact_with_extra_fields):
attrib_name = DISP_FIELD_PREFIX + str(contact_field.id)
raw_value = getattr(contact_with_extra_fields, attrib_name)
if raw_value:
html_value = contact_field.format_value_html(raw_value)
return mark_safe(html_value)
else:
try:
default_html_func = getattr(contact_field, 'default_value_html')
html_value = default_html_func()
return mark_safe(html_value)
except AttributeError:
return ''
def field_widget_factory(contact_field):
return lambda contact_with_extra_fields: \
field_widget(contact_field, contact_with_extra_fields)
class CustomColumnsFilter(filters.ListFilter):
'''
This is not really a filter. This acutally adds columns to the query.
'''
title = ugettext_lazy('Change columns')
template = 'choose_columns.html'
def __init__(self, request, params, model, view):
super().__init__(request, params, model, view)
params.pop('fields', None)
params.pop('savecolumns', None)
def has_output(self):
return True # This is required so that queryset is called
def choices(self, cl):
# This is an ugly hack to recover all the non-fields django-filters, to
# build the select column base return url
# We do it here because we need the cl.
return cl.get_query_string({}, ['fields', 'savecolumns']),
def queryset(self, request, q):
return q
def expected_parameters(self):
return ['fields']
class BaseContactListView(NgwListView):
'''
Base view for contact list.
That view should NOT be called directly since there is no user check.
'''
template_name = 'contact_list.html'
list_filter = CustomColumnsFilter,
actions = (
'action_csv_export', # See NgwListView
'action_vcard_export',
'action_bcc',
'add_to_group',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.list_display = []
def get_root_queryset(self):
# Make sure self.contactgroup is defined:
if not hasattr(self, 'contactgroup'):
self.contactgroup = None
q = ContactQuerySet(Contact._default_manager.model,
using=Contact._default_manager._db)
current_cg = self.contactgroup
list_display = []
request = self.request
user = request.user
fields = request.GET.getlist('fields', None)
if not fields:
fields = get_default_columns(user)
strfields = ','.join(fields)
fields = strfields.split(',')
if request.GET.get('savecolumns', False):
user.set_fieldvalue(request, FIELD_COLUMNS, strfields)
self.strfields = strfields
self.fields = fields
for prop in self.fields:
if prop == 'name':
if current_cg is not None and current_cg.date:
q.add_busy(current_cg.id)
q.add_birthday(current_cg)
else:
q.add_busy()
q.add_birthday()
list_display.append('name_with_relative_link')
elif prop.startswith(DISP_GROUP_PREFIX):
groupid = int(prop[len(DISP_GROUP_PREFIX):])
if not perms.c_can_see_members_cg(user.id, groupid):
# just ignore groups that aren't allowed to be seen
continue
q.add_group(groupid)
cg = ContactGroup.objects.get(pk=groupid)
# attribute_name = 'text_'+prop
# setattr(self, attribute_name,
# membership_to_text_factory(groupid))
# cols.append((cg.name, attribute_name, None))
attribute_name = 'html_'+prop
attribute = membership_extended_widget_factory(request, cg)
attribute.short_description = str(cg)
setattr(self, attribute_name, attribute)
list_display.append(attribute_name)
# cols.append(('group_{}_flags'.format(groupid),
# 'group_{}_flags'.format(groupid), None))
elif prop.startswith(DISP_FIELD_PREFIX):
fieldid = prop[len(DISP_FIELD_PREFIX):]
cf = ContactField.objects.get(pk=fieldid)
if not perms.c_can_view_fields_cg(
user.id, cf.contact_group_id):
continue # Just ignore fields that can't be seen
q.add_field(fieldid)
attribute_name = 'html_'+prop
attribute = field_widget_factory(cf)
attribute.short_description = cf.name
attribute.admin_order_field = prop
# TODO: Investigate why there are so many warnings:
# attribute.allow_tags = True
setattr(self, attribute_name, attribute)
list_display.append(attribute_name)
elif prop == 'busy':
if current_cg is not None:
if current_cg.date:
q.add_busy(current_cg.id)
list_display.append('agenda')
else:
raise ValueError('Invalid field '+prop)
if current_cg is not None:
q.add_group(current_cg.id)
q.add_messages(current_cg.id)
self.group_status = membership_extended_widget_factory(
request, current_cg)
self.group_status.short_description = _('Status')
list_display.append('group_status')
# cols.append(('group_{}_flags'.format(current_cg.id),
# 'group_{}_flags'.format(current_cg.id), None))
# cols.append(('group_{}_inherited_flags'.format(current_cg.id),
# 'group_{}_inherited_flags'.format(current_cg.id),
# None))
# cols.append(('group_{}_inherited_aflags'.format(current_cg.id),
# 'group_{}_inherited_aflags'.format(current_cg.id),
# None))
self.list_display = list_display
return q
def get_search_results(self, request, queryset, search_term):
'''
Contact list views handle the search in a very special way.
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
'''
self.filter_str = search_term
filter = parse_filterstring(search_term, request.user.id)
self.filter_html = filter.to_html()
return filter.apply_filter_to_query(queryset), False
def get_context_data(self, **kwargs):
context = {}
context['title'] = _('Contact list')
context['objtype'] = Contact
context['nav'] = Navbar(Contact.get_class_navcomponent())
context.update(kwargs)
result = super().get_context_data(**context)
result['fields_form'] = FieldSelectForm(
self.request.user, initial={'fields': self.fields})
result['display'] = self.cl.params.get('display', 'mg') # TODO
result['filter'] = self.filter_str
result['filter_html'] = self.filter_html
result['reset_filter_link'] = self.cl.get_query_string({}, 'q')
return result
def name_with_relative_link(self, contact):
current_cg = self.contactgroup
flags = ''
birthday = getattr(contact, 'birthday', None)
if birthday is not None:
birthday = date(*[int(c) for c in birthday.split('-')])
if current_cg is not None and current_cg.date:
event_length = current_cg.end_date - current_cg.date
bseml = Config.get_birthday_show_event_max_length()
if event_length < timedelta(days=bseml): # interval means +1
# Next aniversary after event start date:
anniversary = date(
current_cg.date.year,
birthday.month,
birthday.day)
if anniversary < current_cg.date:
try:
anniversary = date(
anniversary.year + 1,
anniversary.month,
anniversary.day)
except ValueError: # Febuary 29th
anniversary = date(
anniversary.year + 1,
anniversary.month,
anniversary.day - 1)
age = anniversary.year - birthday.year
# Translators: This is the next birthday strftime(3)
# format, detailled, but without the year
stranniv = anniversary.strftime(_('%A %B %e'))
hint = _('{age} years on {date}').format(
date=stranniv,
age=age)
flags += (' <span class=iconbirthday title="{}"></span>'
.format(html.escape(hint)))
else:
age = date.today().year - birthday.year
hint = _('{age} years today').format(age=age)
flags += ' <span class=iconbirthday title="{}"></span>'.format(
html.escape(hint))
busy = getattr(contact, 'busy', None)
if busy is not None and busy & perms.MEMBER:
hint = _('That contact is busy. Click here for details.')
if current_cg:
excluded_gid = current_cg.id
else:
excluded_gid = GROUP_EVERYBODY
flags += ' <span class=iconbusy title="{}" ' \
'data-contactid="{}" data-groupid={}>' \
'</span>'.format(
html.escape(hint),
contact.id,
excluded_gid)
return html.format_html(
mark_safe('<a href="{id}/"><b>{name}</a></b> {flags}'),
id=contact.id,
name=html.escape(contact.name),
flags=mark_safe(flags),
)
name_with_relative_link.short_description = ugettext_lazy('Name')
name_with_relative_link.admin_order_field = 'name'
def agenda(self, contact):
busy = getattr(contact, 'busy')
if busy & perms.MEMBER:
return _('Busy')
elif busy & perms.INVITED:
return _('Invited')
elif busy == 0:
return _('Available')
else:
return 'Error {}'.format(busy)
agenda.short_description = ugettext_lazy('Agenda')
agenda.admin_order_field = 'busy'
def action_bcc(self, request, queryset):
emails = []
noemails = []
for contact in queryset:
# only the first email of each contact
c_emails = contact.get_fieldvalues_by_type('EMAIL')
if c_emails:
emails.append(c_emails[0])
else:
noemails.append(contact.name)
if emails:
messages.add_message(
request, messages.SUCCESS,
mark_safe('<a href="{}">{}</a>'.format(
'mailto:?bcc=' + ', '.join(emails),
_('List generated. Click here.'))))
if noemails:
messages.add_message(
request, messages.WARNING,
_('The following people do not have an email address: {}')
.format(', '.join(noemails)))
return None
action_bcc.short_description = ugettext_lazy(
"Send email locally (thunderbird or similar)")
def action_vcard_export(self, request, queryset):
result = ''
for contact in queryset:
result += contact.vcard()
return HttpResponse(result, content_type='text/x-vcard')
action_vcard_export.short_description = ugettext_lazy(
"Vcard format export")
def add_to_group(self, request, queryset):
ids = request.POST.getlist('_selected_action')
return HttpResponseRedirect(
'/contacts/add_to_group?ids=' + ','.join(ids))
add_to_group.short_description = ugettext_lazy("Add to another group")
class ContactListView(NgwUserAcl, BaseContactListView):
'''
Only show visible contacts
'''
def get_root_queryset(self):
qs = super().get_root_queryset()
qs.qry_from.append(
'JOIN v_c_can_see_c ON contact.id=v_c_can_see_c.contact_id_2')
qs.filter('v_c_can_see_c.contact_id_1 = {}'.format(
self.request.user.id))
return qs
#######################################################################
#
# Add to another group
#
#######################################################################
class GroupAddManyForm(forms.Form):
ids = forms.CharField(widget=forms.widgets.HiddenInput)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
self.fields['group'] = forms.ChoiceField(
label=_('Target group'),
choices=[
('', _('Choose a group')),
(_('Permanent groups'), [
(group.id, group.name)
for group in ContactGroup
.objects
.filter(date__isnull=1)
.with_user_perms(user.id, perms.CHANGE_MEMBERS)
.order_by('name')]),
(_('Events'), [
(group.id, str(group))
for group in ContactGroup
.objects
.filter(date__isnull=0)
.filter(perso_unavail=False)
.with_user_perms(user.id, perms.CHANGE_MEMBERS)
.order_by('-date', 'name')]),
],
)
self.fields['flags'] = FlagsField(label=ugettext_lazy('Membership'))
contact_ids = kwargs['initial']['ids'].split(',')
contacts = Contact.objects.filter(pk__in=contact_ids)
contacts = contacts.extra(
tables=('v_c_can_see_c',),
where=(
'v_c_can_see_c.contact_id_1={}'.format(self.user.id),
'v_c_can_see_c.contact_id_2=contact.id'))
self.fields['contacts'] = forms.MultipleChoiceField(
label=_('Contacts'),
choices=[(contact.id, contact.name) for contact in contacts],
initial=contact_ids,
widget=forms.widgets.CheckboxSelectMultiple(
attrs={'class': 'contactchoices'}))
def clean(self):
data = super().clean()
if 'group' in data:
flags = data['flags']
if (flags & ~perms.ADMIN_ALL):
group = get_object_or_404(
ContactGroup, pk=self.cleaned_data['group'])
if group.virtual:
self.add_error('group', _(
'This is a virtual group. It cannot have members.'))
if (flags & perms.ADMIN_ALL
and not perms.c_operatorof_cg(self.user.id,
self.cleaned_data['group'])):
self.add_error('group', _(
'You need to be operator of the target group to add this'
' kind of membership.'))
if data['flags'] == 0:
self.add_error('flags', _('You must select at least one mode'))
return data
def add_them(self, request):
group_id = self.cleaned_data['group']
target_group = get_object_or_404(ContactGroup, pk=group_id)
contact_ids = self.cleaned_data['contacts']
contacts = Contact.objects.filter(pk__in=contact_ids)
# Check selected contacts are visible
contacts = contacts.extra(
tables=('v_c_can_see_c',),
where=(
'v_c_can_see_c.contact_id_1={}'.format(self.user.id),
'v_c_can_see_c.contact_id_2=contact.id'))
modes = ''
intvalue = self.cleaned_data['flags']
for flag, anint in perms.FLAGTOINT.items():
if anint & intvalue:
modes += '+' + flag
target_group.set_member_n(request, contacts, modes)
class GroupAddManyView(NgwUserAcl, FormView):
form_class = GroupAddManyForm
template_name = 'group_add_contacts_to.html'
def get_initial(self):
if self.request.method == 'POST':
querydict = self.request.POST
else:
querydict = self.request.GET
return {'ids': querydict['ids']}
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_context_data(self, **kwargs):
context = {}
if self.request.method == 'POST':
querydict = self.request.POST
else:
querydict = self.request.GET
ids = [int(id) for id in querydict['ids'].split(',')]
context['title'] = _('Add {} contact(s) to a group').format(len(ids))
context['nav'] = Navbar(Contact.get_class_navcomponent())
context['nav'].add_component(('add_to_group', _('add contacts to')))
context['json_ids'] = mark_safe(json.dumps(ids))
context.update(kwargs)
return super().get_context_data(**context)
def form_valid(self, form):
form.add_them(self.request)
self.gid = form.cleaned_data['group']
self.success_form = form # Used by get_success_url
return super().form_valid(form)
def form_invalid(self, form):
return super().form_invalid(form)
def get_success_url(self):
group_id = self.gid # from form_valid()
target_group = get_object_or_404(ContactGroup, pk=group_id)
return target_group.get_absolute_url() + 'members/'
class ContactCheckAvailableView(NgwUserAcl, View):
def post(self, request, *args, **kwargs):
if self.request.method == 'POST':
querydict = self.request.POST
else:
querydict = self.request.GET
ids = querydict['ids']
gid = querydict['group']
ids = querydict['ids'].split(',')
contacts = ContactQuerySet(Contact._default_manager.model,
using=Contact._default_manager._db)
contacts = contacts.filter(pk__in=ids)
cg = ContactGroup.objects.get(pk=gid)
resp_contacts = []
if cg.is_event():
contacts.add_busy(gid)
for contact in contacts:
resp_contacts.append({
'id': contact.id,
'busy': contact.busy or 0,
})
else:
for contact in contacts:
resp_contacts.append({
'id': contact.id,
'busy': 0,
})
jsonresponse = json.dumps({
'event_busy': cg.busy,
'contacts': resp_contacts,
})
return HttpResponse(jsonresponse, content_type='application/json')
get = post
#######################################################################
#
# Contact details
#
#######################################################################
class ContactDetailView(InGroupAcl, TemplateView):
is_group_required = False
template_name = 'contact_detail.html'
def check_perm_groupuser(self, group, user):
cid = int(self.kwargs['cid'])
if self.contactgroup:
if not self.contactgroup.userperms & perms.SEE_MEMBERS:
raise PermissionDenied
else: # No group specified
if cid == user.id:
# The user can see himself
pass
elif perms.c_can_see_members_cg(user.id, GROUP_EVERYBODY):
pass
elif perms.c_can_see_c(user.id, cid):
pass
else:
raise PermissionDenied
def get_context_data(self, **kwargs):
cid = int(self.kwargs['cid'])
contact = get_object_or_404(Contact, pk=cid)
rows = []
for cf in contact.get_contactfields(self.request.user.id):
try:
cfv = ContactFieldValue.objects.get(
contact_id=cid, contact_field_id=cf.id)
rows.append((cf.name, mark_safe(cfv.as_html())))
except ContactFieldValue.DoesNotExist:
pass # ignore blank values
context = {}
context['title'] = _('Details for {}').format(contact)
cg = self.contactgroup
if cg:
# context['title'] += ' in group '+str(cg)
context['nav'] = cg.get_smart_navbar()
context['nav'].add_component(('members', _('members')))
context['active_submenu'] = 'members'
else:
context['nav'] = Navbar(Contact.get_class_navcomponent())
context['nav'].add_component(contact.get_navcomponent())
context['objtype'] = Contact
context['contact'] = contact
context['rows'] = rows
context['group_user_perms'] = (
ContactGroup.objects
.get(pk=GROUP_USER)
.get_contact_perms(self.request.user.id))
context['group_user_ngw_perms'] = (
ContactGroup.objects
.get(pk=GROUP_USER_NGW)
.get_contact_perms(self.request.user.id))
context.update(kwargs)
return super().get_context_data(**context)
#######################################################################
#
# Contact vcard
#
#######################################################################
class ContactVcardView(InGroupAcl, View):
'''
Returns vcf file for specified user
'''
is_group_required = False
def check_perm_groupuser(self, group, user):
cid = int(self.kwargs['cid'])
if self.contactgroup:
if not self.contactgroup.userperms & perms.SEE_MEMBERS:
raise PermissionDenied
else: # No group specified
if cid == user.id:
# The user can see himself
pass
elif perms.c_can_see_members_cg(user.id, GROUP_EVERYBODY):
pass
else:
raise PermissionDenied
def get(self, request, *args, **kwargs):
# TODO: We should also check the specific fields (email, address,
# phone, ...) are readable by user
cid = int(self.kwargs['cid'])
contact = get_object_or_404(Contact, pk=cid)
return HttpResponse(contact.vcard(), content_type='text/x-vcard')
#######################################################################
#
# Contact edit /add
#
#######################################################################
class ContactEditForm(forms.ModelForm):
class Meta:
model = Contact
fields = ['name']
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
contactgroup = kwargs.pop('contactgroup')
user = kwargs.pop('user') # contact making the query, not the edited
super().__init__(*args, **kwargs)
self.contactgroup = contactgroup
if not perms.c_can_write_fields_cg(user.id, GROUP_EVERYBODY):
del self.fields['name'] # = forms.CharField(label=_('Name'))
if instance:
cfields = instance.get_contactfields(user.id, writable=True)
# Here we have all the writable fields, including the one from
# other groups that the user can see
elif contactgroup:
contactgroupids = [
g.id for g in contactgroup.get_self_and_supergroups()]
cfields = (
ContactField.objects
.filter(contact_group_id__in=contactgroupids)
.with_user_perms(user.id, writable=True))
# Here we have the fields from contact_group and all its super
# groups, IF user can write to them
else: # FIXME (new contact without any contactgroup)
cfields = []
# store dbfields
self.cfields = cfields
for cf in cfields:
f = cf.get_form_fields()
if f:
try:
cfv = ContactFieldValue.objects.get(contact=instance,
contact_field=cf)
f.initial = cf.db_value_to_formfield_value(cfv.value)
except ContactFieldValue.DoesNotExist:
initial = cf.default
if cf.type == FTYPE_DATE and initial == 'today':
initial = date.today()
f.initial = initial
self.fields[str(cf.id)] = f
def save(self, request):
is_creation = self.instance.pk is None
contact = super().save()
data = self.cleaned_data
# 1/ The contact name
if is_creation:
if not perms.c_can_write_fields_cg(
request.user.id, GROUP_EVERYBODY):
# If user can't write name, we have a problem creating a new
# contact
raise PermissionDenied
log = Log(contact_id=request.user.id)
log.action = LOG_ACTION_ADD
log.target = 'Contact ' + str(contact.id)
log.target_repr = 'Contact ' + contact.name
log.save()
log = Log(contact_id=request.user.id)
log.action = LOG_ACTION_CHANGE
log.target = 'Contact ' + str(contact.id)
log.target_repr = 'Contact ' + contact.name
log.property = 'Name'
log.property_repr = 'Name'
log.change = 'new value is ' + contact.name
log = Log(request.user.id)
cig = ContactInGroup(contact_id=contact.id,
group_id=self.contactgroup.id)
cig.flags = perms.MEMBER
cig.save()
# TODO: Log new cig
# TODO: Check can add members in super groups
else:
if perms.c_can_write_fields_cg(request.user.id, GROUP_EVERYBODY):
if contact.name != data['name']:
log = Log(contact_id=request.user.id)
log.action = LOG_ACTION_CHANGE
log.target = 'Contact ' + str(contact.id)
log.target_repr = 'Contact ' + contact.name
log.property = 'Name'
log.property_repr = 'Name'
log.change = (
'change from ' + contact.name + ' to ' + data['name'])
log.save()
# 2/ In ContactFields
for cf in self.cfields:
newvalue = data[str(cf.id)]
if cf.type == FTYPE_PASSWORD and not newvalue:
continue # Ignore entries when password is empty (no change)
# if cf.type == 'FILE' && newvalue == False:
# TODO: delete the old file
if isinstance(newvalue, UploadedFile):
newvalue = cf.save_file(contact.id, newvalue)
if newvalue is not None:
newvalue = cf.formfield_value_to_db_value(newvalue)
contact.set_fieldvalue(request, cf, newvalue)
return contact
class ContactEditMixin(ModelFormMixin):
template_name = 'edit.html'
form_class = ContactEditForm
model = Contact
pk_url_kwarg = 'cid'
def check_perm_groupuser(self, group, user):
if group:
if not group.userperms & perms.SEE_MEMBERS:
# CHANGE_MEMBERS is for adding new members, removing them
# SEE_MEMBERS is enough here
messages.add_message(
self.request, messages.ERROR,
_('You are not authorized to see members of that'
' group.'))
raise PermissionDenied
if group.virtual:
messages.add_message(
self.request, messages.ERROR,
_('This is a virtual group. It cannot have members.'))
raise PermissionDenied
else:
cid = int(self.kwargs['cid']) # ok to crash if create & no group
if cid == user.id:
# The user can change himself
pass
elif perms.c_can_see_c(user.id, cid):
pass
else:
raise PermissionDenied
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['contactgroup'] = self.contactgroup
return kwargs
def form_valid(self, form):
request = self.request
contact = form.save(request)
messages.add_message(
request, messages.SUCCESS,
_('Contact {} has been saved.').format(contact.name))
if self.pk_url_kwarg not in self.kwargs: # new added instance
base_url = '.'
else:
base_url = '..'
if request.POST.get('_continue', None):
return HttpResponseRedirect(
base_url + '/' + str(contact.id) + '/edit')
elif request.POST.get('_addanother', None):
return HttpResponseRedirect(base_url + '/add')
else:
return HttpResponseRedirect(base_url)
def get_context_data(self, **kwargs):
context = {}
if self.object:
title = _('Editing {}').format(self.object)
id = self.object.id
else:
title = _('Adding a new {}').format(
Contact.get_class_verbose_name())
id = None
context['title'] = title
context['id'] = id
context['objtype'] = Contact
if self.contactgroup:
context['nav'] = self.contactgroup.get_smart_navbar()
context['nav'].add_component(('members', _('members')))
else:
context['nav'] = Navbar(Contact.get_class_navcomponent())
if id:
context['nav'].add_component(self.object.get_navcomponent())
context['nav'].add_component(('edit', _('edit')))
else:
context['nav'].add_component(('add', _('add')))
context.update(kwargs)
return super().get_context_data(**context)
class ContactEditView(InGroupAcl, ContactEditMixin, UpdateView):
pass
class ContactCreateView(InGroupAcl, ContactEditMixin, CreateView):
pass
#######################################################################
#
# Contact change password
#
#######################################################################
class ContactPasswordForm(forms.ModelForm):
# TODO: Use admin SetPasswordForm ?
class Meta:
model = Contact
fields = []
new_password = forms.CharField(widget=forms.PasswordInput())
confirm_password = forms.CharField(widget=forms.PasswordInput())
def clean(self):
new_password = self.cleaned_data.get('new_password', '')
if (new_password
!= self.cleaned_data.get('confirm_password', '')):
raise forms.ValidationError(_('The passwords must match!'))
password_validation.validate_password(new_password)
return self.cleaned_data
def save(self, request):
data = self.cleaned_data
self.instance.set_password(data['new_password'], request=request)
return self.instance
class PasswordView(InGroupAcl, UpdateView):
'''
Change contact password
'''
is_group_required = False
template_name = 'password.html'
form_class = ContactPasswordForm
model = Contact
pk_url_kwarg = 'cid'
def check_perm_groupuser(self, group, user):
if int(self.kwargs['cid']) == user.id:
return # Ok for oneself
if not perms.c_can_write_fields_cg(user.id, GROUP_USER):
raise PermissionDenied
def form_valid(self, form):
contact = form.save(self.request)
messages.add_message(
self.request, messages.SUCCESS,
_('Password has been changed successfully!'))
if self.contactgroup:
return HttpResponseRedirect(
self.contactgroup.get_absolute_url()
+ 'members/'
+ str(contact.id) + '/')
else:
return HttpResponseRedirect(contact.get_absolute_url())
def get_context_data(self, **kwargs):
contact = self.object
context = {}
context['title'] = _('Change password')
context['contact'] = contact
if self.contactgroup:
context['nav'] = self.contactgroup.get_smart_navbar()
context['nav'].add_component(('members', _('members')))
else:
context['nav'] = Navbar(Contact.get_class_navcomponent())
context['nav'].add_component(contact.get_navcomponent())
context['nav'].add_component(('password', _('password')))
context.update(kwargs)
return super().get_context_data(**context)
#######################################################################
#
# Contact change password hook
#
#######################################################################
# from django.views.decorators.csrf import csrf_exempt
# from django.utils.decorators import method_decorator
# class HookPasswordView(View):
# '''
# This view allow a user to change his password through a post.
# That view allow other modules to change the central password.
# '''
# @method_decorator(csrf_exempt)
# def dispatch(self, request, *args, **kwargs):
# username = request.META['REMOTE_USER'] # Apache external auth
# request.user = Contact.objects.get_by_natural_key(username)
# return super().dispatch(request, *args, **kwargs)
#
# def post(self, request):
# # TODO check password strength
# newpassword_plain = request.POST['password']
# request.user.set_password(newpassword_plain, request=request)
# return HttpResponse('OK')
#######################################################################
#
# Contact delete
#
#######################################################################
class ContactDeleteView(InGroupAcl, NgwDeleteView):
is_group_required = False
model = Contact
pk_url_kwarg = 'cid'
def check_perm_groupuser(self, group, user):
if not user.is_admin():
raise PermissionDenied
def get_context_data(self, **kwargs):
context = {}
if self.contactgroup:
context['nav'] = self.contactgroup.get_smart_navbar()
context['nav'].add_component(('members', _('members')))
context['nav'].add_component(('delete', _('delete')))
context.update(kwargs)
return super().get_context_data(**context)
#######################################################################
#
# Add contact filter
#
#######################################################################
class FilterAddView(NgwUserAcl, View):
def check_perm_user(self, user):
if int(self.kwargs['cid']) == user.id:
return # Ok for oneself
if not perms.c_can_write_fields_cg(user.id, GROUP_USER_NGW):
raise PermissionDenied
def get(self, request, cid):
cid = int(cid)
contact = get_object_or_404(Contact, pk=cid)
filter_str = request.GET['filterstr']
filter_list = contact.get_saved_filters()
filter_list.append({'name': _('No name'), 'filter_string': filter_str})
contact.set_saved_filters(request, filter_list)
messages.add_message(request, messages.SUCCESS,
_('Filter has been added successfully!'))
return HttpResponseRedirect(
reverse('filter_edit', args=(cid, len(filter_list)-1)))
#######################################################################
#
# List contact filters
#
#######################################################################
class FilterListView(InGroupAcl, TemplateView):
'''
List user custom filters
'''
is_group_required = False
template_name = 'filter_list.html'
def check_perm_groupuser(self, group, user):
if int(self.kwargs['cid']) == user.id:
return # Ok for oneself
if not perms.c_can_view_fields_cg(user.id, GROUP_USER_NGW):
raise PermissionDenied
def get_context_data(self, **kwargs):
cid = int(self.kwargs['cid'])
contact = get_object_or_404(Contact, pk=cid)
filter_list = contact.get_saved_filters()
filters = [finfo['name'] for finfo in filter_list]
context = {}
context['title'] = _('User custom filters')
context['contact'] = contact
context['filters'] = filters
context['nav'] = Navbar(Contact.get_class_navcomponent())
context['nav'].add_component(contact.get_navcomponent())
context['nav'].add_component(('filters', _('custom filters')))
context.update(kwargs)
return super().get_context_data(**context)
#######################################################################
#
# Rename custom filter
#
#######################################################################
class FilterEditForm(forms.Form):
name = forms.CharField(label=_('Name'), max_length=50)
shared = forms.BooleanField(
required=False,
label=ugettext_lazy('Shared'),
help_text=ugettext_lazy(
'Allow other users to use that filter.'))
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
contact = kwargs.pop('contact')
fid = int(kwargs.pop('fid'))
super().__init__(*args, **kwargs)
self.contact = contact
self.fid = fid
self.filter_list = contact.get_saved_filters()
try:
filterinfo = self.filter_list[fid]
except (IndexError, ValueError):
raise Http404
self.fields['name'].initial = filterinfo['name']
self.fields['shared'].initial = filterinfo.get('shared', False)
filterstr = filterinfo['filter_string']
try:
self.filter_html = parse_filterstring(filterstr, user.id).to_html()
except PermissionDenied:
self.filter_html = _(
"[Permission was denied to explain that filter. You probably"
" don't have access to the fields / group names it is using.]"
"<br>Raw filter={}").format(filterstr)
except ContactField.DoesNotExist:
self.filter_html = _(
"[This filter uses a field that doesn't exist anymore.]")
def save(self, request):
filter_list = self.filter_list
filter_list[self.fid] = {
'name': self.cleaned_data['name'],
'shared': self.cleaned_data['shared'],
'filter_string': filter_list[self.fid]['filter_string']}
self.contact.set_saved_filters(request, filter_list)
class FilterEditView(NgwUserAcl, FormView):
form_class = FilterEditForm
template_name = 'filter_form.html'
def check_perm_user(self, user):
if int(self.kwargs['cid']) == user.id:
return # Ok for oneself
if not perms.c_can_write_fields_cg(user.id, GROUP_USER_NGW):
raise PermissionDenied
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['contact'] = get_object_or_404(
Contact, pk=int(self.kwargs['cid']))
kwargs['fid'] = self.kwargs['fid']
return kwargs
def get_context_data(self, **kwargs):
contact = get_object_or_404(Contact, pk=int(self.kwargs['cid']))
context = {}
context['title'] = _('User custom filter renaming')
context['nav'] = Navbar(Contact.get_class_navcomponent())
context['nav'].add_component(contact.get_navcomponent())
context['nav'].add_component(('filters', _('custom filters')))
# context['nav'].add_component(
# (self.kwargs['fid'], self.form.filtername))
context.update(kwargs)
return super().get_context_data(**context)
def form_valid(self, form):
form.save(self.request)
messages.add_message(self.request, messages.SUCCESS,
_('Filter has been renamed.'))
return super().form_valid(form)
def get_success_url(self):
return reverse('contact_detail', args=(self.kwargs['cid'],))
#######################################################################
#
# Delete contact filter
#
#######################################################################
class FilterDeleteView(NgwUserAcl, View):
def check_perm_user(self, user):
if int(self.kwargs['cid']) == user.id:
return # Ok for oneself
if not perms.c_can_write_fields_cg(user.id, GROUP_USER_NGW):
raise PermissionDenied
def get(self, request, cid, fid):
cid = int(cid)
fid = int(fid)
contact = get_object_or_404(Contact, pk=cid)
filter_list = contact.get_saved_filters()
del filter_list[fid]
contact.set_saved_filters(request, filter_list)
messages.add_message(request, messages.SUCCESS,
_('Filter has been deleted.'))
return HttpResponseRedirect(contact.get_absolute_url())
#######################################################################
#
# Set default group of contact
#
#######################################################################
class DefaultGroupForm(forms.ModelForm):
class Meta:
model = Contact
fields = []
def __init__(self, *args, **kwargs):
contact = kwargs.get('instance')
# FIXME Problem when changing the default group for another user:
user = contact
super().__init__(*args, **kwargs)
available_groups = (
ContactGroup.objects
.with_user_perms(user.id, wanted_flags=perms.SEE_CG)
.with_member(contact.id)
.filter(date__isnull=True))
choices = (
[('', _('Create new personnal group'))]
+ [(cg.id, cg.name) for cg in available_groups
if not cg.date and perms.c_can_see_cg(contact.id, cg.id)])
default_group = contact.get_fieldvalue_by_id(FIELD_DEFAULT_GROUP)
self.fields['default_group'] = forms.ChoiceField(
label=_('Default group'), choices=choices, required=False,
initial=default_group)
def save(self, request):
default_group = self.cleaned_data['default_group']
contact = self.instance
if not default_group:
cg = ContactGroup(
name=_('Group of {}').format(contact.name),
description=_('This is the default group of {}').format(
contact.name),
)
cg.save()
cg.check_static_folder_created()
cig = ContactInGroup(
contact=contact,
group_id=cg.id,
flags=perms.MEMBER | perms.ADMIN_ALL,
)
cig.save()
messages.add_message(
request, messages.SUCCESS,
_('Personnal group created.'))
default_group = str(cg.id)
contact.set_fieldvalue(request, FIELD_DEFAULT_GROUP, default_group)
return contact
class DefaultGroupView(NgwUserAcl, UpdateView):
'''
Change the default group
'''
template_name = 'contact_default_group.html'
form_class = DefaultGroupForm
model = Contact
pk_url_kwarg = 'cid'
def check_perm_user(self, user):
if int(self.kwargs['cid']) == user.id:
return # Ok for oneself
if not perms.c_can_write_fields_cg(user.id, GROUP_USER_NGW):
raise PermissionDenied
def form_valid(self, form):
contact = form.save(self.request)
messages.add_message(
self.request, messages.SUCCESS,
_('Default group has been changed successfully.'))
return HttpResponseRedirect(contact.get_absolute_url())
def get_context_data(self, **kwargs):
cid = int(self.kwargs['cid'])
contact = get_object_or_404(Contact, pk=cid)
context = {}
context['title'] = _('User default group')
context['nav'] = Navbar(Contact.get_class_navcomponent())
context['nav'].add_component(contact.get_navcomponent())
context['nav'].add_component(('default_group', _('default group')))
context.update(kwargs)
return super().get_context_data(**context)
#######################################################################
#
# Contact Calendar
#
#######################################################################
class ContactCalendarView(NgwUserAcl, TemplateView):
template_name = 'calendar.html'
def check_perm_user(self, user):
if int(self.kwargs.get('cid', 0)) == user.id:
return # Ok for oneself
if not user.is_admin():
raise PermissionDenied
def get_context_data(self, **kwargs):
cid = int(self.kwargs['cid'])
contact = get_object_or_404(Contact, pk=cid)
context = {}
context['title'] = _("{name}'s Calendar").format(
name=contact.name)
context['nav'] = Navbar(Contact.get_class_navcomponent())
context['nav'].add_component(contact.get_navcomponent())
context['nav'].add_component(('calendar', _('calendar')))
context['weekdaystart'] = formats.get_format('FIRST_DAY_OF_WEEK')
context['contactid'] = cid
context.update(kwargs)
return super().get_context_data(**context)
#######################################################################
#
# Contact unavail calendar: json
#
#######################################################################
class ContactUnavailDetailView(NgwUserAcl, View):
def get(self, request, cid, dfrom=None, dto=None, gid=None):
contact = Contact.objects.get(pk=cid)
if gid is not None:
gid = int(gid)
assert dfrom is None and dto is None, \
"dfrom+dto and gid parameters are mutualy exclusive"
group = ContactGroup.objects.get(pk=gid)
user = self.request.user
if not perms.c_can_see_cg(user.id, group.id):
raise PermissionDenied
dfrom = group.date
dto = group.end_date
else:
if dfrom is not None:
dfrom = datetime.strptime(dfrom, '%Y-%m-%d')
if dto is not None:
dto = datetime.strptime(dto, '%Y-%m-%d')
if dfrom is None:
dfrom = date.today()
if dto is None:
dto = date.today()
# Look for "busy" events that contact is member of
# even those that are secrets
# Add permissions columns for the requester
events = (
ContactGroup.objects
.with_user_perms(
request.user.id,
wanted_flags=None,
add_column=True)
.with_member(contact.id)
.filter(busy=True)
.filter(
# start within boundaries:
Q(date__gte=dfrom, date__lte=dto)
# or end within boundaries:
| Q(end_date__gte=dfrom, end_date__lte=dto)
# or start before and end after (this is a long event):
| Q(date__lte=dfrom, end_date__gte=dto))
)
visible_events = {}
invisible_events = False
for e in events:
if e.id == gid:
continue # Ignore self
if e.userperms & perms.SEE_MEMBERS:
visible_events[e.id] = e
else:
invisible_events = True
result = {
'contact': contact.id,
'from': dfrom.strftime('%Y-%m-%d'),
'to': dto.strftime('%Y-%m-%d'),
'result': loader.render_to_string(
'contact_unavail_detail.html', {
'visible_events': visible_events,
'invisible_events': invisible_events,
}),
}
jsonresponse = json.dumps(result)
return HttpResponse(jsonresponse, content_type='application/json')
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsOperations(object):
"""ExpressRoutePortsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePort"
"""Retrieves the requested ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of ExpressRoutePort.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePort, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.ExpressRoutePort
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "_models.ExpressRoutePort"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePort"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRoutePort')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "_models.ExpressRoutePort"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRoutePort"]
"""Creates or updates the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to the create ExpressRoutePort operation.
:type parameters: ~azure.mgmt.network.v2021_05_01.models.ExpressRoutePort
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRoutePort or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_05_01.models.ExpressRoutePort]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePort"
"""Update ExpressRoutePort tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to update ExpressRoutePort resource tags.
:type parameters: ~azure.mgmt.network.v2021_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePort, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.ExpressRoutePort
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRoutePortListResult"]
"""List all the ExpressRoutePort resources in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRoutePortListResult"]
"""List all the ExpressRoutePort resources in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
def generate_loa(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
request, # type: "_models.GenerateExpressRoutePortsLOARequest"
**kwargs # type: Any
):
# type: (...) -> "_models.GenerateExpressRoutePortsLOAResult"
"""Generate a letter of authorization for the requested ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of ExpressRoutePort.
:type express_route_port_name: str
:param request: Request parameters supplied to generate a letter of authorization.
:type request: ~azure.mgmt.network.v2021_05_01.models.GenerateExpressRoutePortsLOARequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenerateExpressRoutePortsLOAResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.GenerateExpressRoutePortsLOAResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenerateExpressRoutePortsLOAResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.generate_loa.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'GenerateExpressRoutePortsLOARequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenerateExpressRoutePortsLOAResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_loa.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRoutePorts/{expressRoutePortName}/generateLoa'} # type: ignore
| |
import unittest
import subprocess
import json
import os
import util
import time
from TestConfig import *
config = {}
test_env = os.getenv('test_env', 'aiaas')
env_setup = TestConfig()
config = env_setup.setEnvironment(test_env)
cli = os.path.abspath('./pb-cli/index.js')
class TestPBRemove(unittest.TestCase):
@classmethod
def setUpClass(self):
self.util = util.TestUtil()
self.util.announce_test_block('pb remove')
self.hostname = config["hostname"]
print self.hostname
def setUp(self):
self.util.create_and_compile()
def test_remove_aiml_file(self):
self.util.it('removes an aiml file from the bot.')
bot_files = self.util.get_file_list()
self.assertTrue("test.aiml" in bot_files)
result = subprocess.Popen([
cli, 'remove',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
'test.aiml'
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
result.communicate(input='yes')
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertFalse("test.aiml" in bot_files)
def test_remove_map_file(self):
self.util.it('removes a map file from the bot.')
bot_files = self.util.get_file_list()
self.assertTrue("test.map" in bot_files)
result = subprocess.Popen([
cli, 'remove',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
'test.map'
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
result.communicate(input='yes')
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertFalse("test.map" in bot_files)
def test_remove_set_file(self):
self.util.it('removes a set file from the bot.')
bot_files = self.util.get_file_list()
self.assertTrue("test.set" in bot_files)
result = subprocess.Popen([
cli, 'remove',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
'test.set'
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
result.communicate(input='yes')
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertFalse("test.set" in bot_files)
def test_remove_substitution_file(self):
self.util.it('removes a substitution file from the bot.')
bot_files = self.util.get_file_list()
self.assertTrue("test.substitution" in bot_files)
result = subprocess.Popen([
cli, 'remove',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
'test.substitution'
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
result.communicate(input='yes')
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertFalse("test.substitution" in bot_files)
def test_remove_pdefaults_file(self):
self.util.it('removes a pdefaults file from the bot.')
bot_files = self.util.get_file_list()
self.assertTrue("testbot.pdefaults" in bot_files)
result = subprocess.Popen([
cli, 'remove',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
'testbot.pdefaults'
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
result.communicate(input='yes')
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertFalse("testbot.pdefaults" in bot_files)
def test_remove_properties_file(self):
self.util.it('removes a properties file from the bot.')
bot_files = self.util.get_file_list()
self.assertTrue("testbot.properties" in bot_files)
result = subprocess.Popen([
cli, 'remove',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
'testbot.properties'
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
result.communicate(input='yes')
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertFalse("testbot.properties" in bot_files)
def test_invalid_botName(self):
self.util.it('returns 400 if the botname is invalid.')
result = subprocess.Popen([
cli, 'remove',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', 'ABCDEFG',
'--hostname', self.hostname,
'test.aiml',
'--yes'
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.assertTrue('400' in result.stdout.read())
def test_bot_not_found(self):
self.util.it('returns 412 if the bot does not exist.')
result = subprocess.Popen([
cli, 'remove',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', '12345',
'--hostname', self.hostname,
'test.aiml',
'--yes'
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.assertTrue('412' in result.stdout.read())
def test_file_not_found(self):
self.util.it('returns 412 if the file does not exist.')
result = subprocess.Popen([
cli, 'remove',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
'fake.aiml',
'--yes'
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.assertTrue('412' in result.stdout.read())
def test_invalid_userKey(self):
self.util.it('returns 401 if the user_key is invalid.')
result = subprocess.Popen([
cli, 'remove',
'--app_id', config['appId'],
'--user_key', '12345',
'--botname', config['botName'],
'--hostname', self.hostname,
'test.aiml',
'--yes'
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.assertTrue('401' in result.stdout.read())
def test_invalid_appId(self):
self.util.it('returns 401 if the app_id is invalid.')
result = subprocess.Popen([
cli, 'remove',
'--app_id', '12345',
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
'test.aiml',
'--yes'
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.assertTrue('401' in result.stdout.read())
def tearDown(self):
self.util.delete_bot()
if __name__ == "__main__":
unittest.main()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the core layer classes for model pruning and its functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
MASK_COLLECTION = 'masks'
THRESHOLD_COLLECTION = 'thresholds'
MASKED_WEIGHT_COLLECTION = 'masked_weights'
WEIGHT_COLLECTION = 'kernel'
# The 'weights' part of the name is needed for the quantization library
# to recognize that the kernel should be quantized.
MASKED_WEIGHT_NAME = 'weights/masked_weight'
class _MaskedConv(base.Layer):
"""Abstract nD convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. The weight tensor of this layer is masked.
If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(_MaskedConv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = utils.normalize_tuple(strides, rank, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.dilation_rate = utils.normalize_tuple(dilation_rate, rank,
'dilation_rate')
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = base.InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = 1 if self.data_format == 'channels_first' else -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.mask = self.add_variable(
name='mask',
shape=kernel_shape,
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self.kernel = self.add_variable(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self.masked_kernel = math_ops.multiply(self.mask, self.kernel,
MASKED_WEIGHT_NAME)
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
self.bias = self.add_variable(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = base.InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = nn.convolution(
input=inputs,
filter=self.masked_kernel,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, self.rank + 2))
if self.bias is not None:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if self.rank == 2:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
if self.rank == 3:
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients. To use bias_add, we collapse Z
# and Y into a single dimension to obtain a 4D input tensor.
outputs_shape = outputs.shape.as_list()
outputs_4d = array_ops.reshape(outputs, [
outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3], outputs_shape[4]
])
outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
class MaskedConv2D(_MaskedConv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(MaskedConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
**kwargs)
class MaskedFullyConnected(base.Layer):
"""Fully-connected layer class with masked weights.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the weight matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the weight matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(MaskedFullyConnected, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = base.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(
min_ndim=2, axes={-1: input_shape[-1].value})
self.kernel = self.add_variable(
'kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype,
trainable=True)
self.mask = self.add_variable(
name='mask',
shape=[input_shape[-1].value, self.units],
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self.masked_kernel = math_ops.multiply(self.mask, self.kernel,
MASKED_WEIGHT_NAME)
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
self.bias = self.add_variable(
'bias',
shape=[
self.units,
],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.masked_kernel,
[[len(shape) - 1], [0]])
# Reshape the output back to the original ndim of the input.
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.masked_kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
| |
import pytest
import vaex
import numpy as np
import numpy.ma
from common import small_buffer
df_a = vaex.from_arrays(a=np.array(['A', 'B', 'C']),
x=np.array([0., 1., 2.]),
y=np.ma.array([0., 9., 2.], mask=[False, True, False]),
m=np.ma.array([1, 2, 3], mask=[False, True, False])
)
df_b = vaex.from_arrays(b=np.array(['A', 'B', 'D']),
x=np.array([2., 1., 0.]),
y=np.ma.array([9., 1., 2.], mask=[True, False, False]),
m=np.ma.array([3, 1, 2], mask=[True, False, False])
)
df_dup = vaex.from_arrays(b=np.array(['A', 'B', 'A']),
x=np.array([2., 1., 2.]),
y=np.ma.array([9., 1., 9.], mask=[True, False, False]),
m=np.ma.array([3, 1, 2], mask=[True, True, False])
)
df_c = vaex.from_arrays(c=np.array(['B', 'C']),
z1=np.array([-1., -2.]),
z2=np.array([True, False]),
)
df_d = vaex.from_arrays(a=np.array(['B', 'C', 'D']),
x1=np.array(['dog', 'cat', 'mouse']),
x2=np.array([3.1, 25, np.nan]),
)
df_e = vaex.from_arrays(a=np.array(['X', 'Y', 'Z']),
x1=np.array(['dog', 'cat', 'mouse']),
x2=np.array([3.1, 25, np.nan]),
)
def test_no_on():
# just adds the columns
df = df_a.join(df_b, rsuffix='_r')
assert df.columns['b'] is df_b.columns['b']
def test_join_masked():
df = df_a.join(other=df_b, left_on='m', right_on='m', rsuffix='_r')
assert df.evaluate('m').tolist() == [1, None, 3]
assert df.evaluate('m_r').tolist() == [1, None, None]
assert df.columns['m_r'].indices.dtype == np.int8
def test_join_nomatch():
df = df_a.join(df_e, on=df_a.a, rprefix='r_')
assert df.x2.tolist() == [None, None, None]
def test_left_a_b():
df = df_a.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
assert df['a'].tolist() == ['A', 'B', 'C']
assert df['b'].tolist() == ['A', 'B', None]
assert df['x'].tolist() == [0, 1, 2]
assert df['x_r'].tolist() == [2, 1, None]
assert df['y'].tolist() == [0, None, 2]
assert df['y_r'].tolist() == [None, 1, None]
def test_left_a_b_as_alias():
df_ac = df_a.copy()
df_bc = df_b.copy()
df_ac['1'] = df_ac['a']
df_bc['2'] = df_bc['b']
df = df_ac.join(other=df_bc, left_on='1', right_on='2', rsuffix='_r')
assert df.evaluate('a').tolist() == ['A', 'B', 'C']
assert df.evaluate('b').tolist() == ['A', 'B', None]
assert df.evaluate('x').tolist() == [0, 1, 2]
assert df.evaluate('x_r').tolist() == [2, 1, None]
assert df.evaluate('y').tolist() == [0, None, 2]
assert df.evaluate('y_r').tolist() == [None, 1, None]
def test_join_indexed():
df = df_a.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
df_X = df_a.join(df, left_on='a', right_on='b', rsuffix='_r')
assert df_X['b'].tolist() == ['A', 'B', None]
def test_left_a_b_filtered():
df_af = df_a[df_a.x > 0]
df = df_af.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
assert df['a'].tolist() == ['B', 'C']
assert df['b'].tolist() == ['B', None]
assert df['x'].tolist() == [1, 2]
assert df['x_r'].tolist() == [1, None]
assert df['y'].tolist() == [None, 2]
assert df['y_r'].tolist() == [1, None]
# actually, even though the filter is applied, all rows will be matched
# since the filter can change
df.set_selection(None, vaex.dataframe.FILTER_SELECTION_NAME)
assert df['a'].tolist() == ['A', 'B', 'C']
assert df['b'].tolist() == ['A', 'B', None]
assert df['x'].tolist() == [0, 1, 2]
assert df['x_r'].tolist() == [2, 1, None]
assert df['y'].tolist() == [0, None, 2]
assert df['y_r'].tolist() == [None, 1, None]
# if we extract, that shouldn't be the case
df_af = df_a[df_a.x > 0].extract()
df = df_af.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
df.set_selection(None, vaex.dataframe.FILTER_SELECTION_NAME)
assert df['a'].tolist() == ['B', 'C']
assert df['b'].tolist() == ['B', None]
assert df['x'].tolist() == [1, 2]
assert df['x_r'].tolist() == [1, None]
assert df['y'].tolist() == [None, 2]
assert df['y_r'].tolist() == [1, None]
def test_inner_a_b_filtered():
df_a_filtered = df_a[df_a.x > 0]
df = df_a_filtered.join(other=df_b, left_on='a', right_on='b', rsuffix='_r', how='inner')
assert df['a'].tolist() == ['B']
assert df['b'].tolist() == ['B']
assert df['x'].tolist() == [1]
assert df['x_r'].tolist() == [1]
assert df['y'].tolist() == [None]
assert df['y_r'].tolist() == [1]
def test_left_a_b_filtered_right():
# similar to test_left_a_b_filtered, but now the df we join is filtered
# take b without the last tow
df_bf = df_b[df_b.b.str.contains('A|B')]
df = df_a.join(df_bf, how='left', on='x', rsuffix='_r')
# columns of the left df
assert df.x.tolist() == [0, 1, 2]
assert df.a.tolist() == ['A', 'B', 'C']
assert df.y.tolist() == [0, None, 2]
assert df.m.tolist() == [1, None, 3]
# columns of the right df
assert df.b.tolist() == [None, 'B', 'A']
assert df.x_r.tolist() == [None, 1, 2]
assert df.y_r.tolist() == [None, 1, None]
assert df.m_r.tolist() == [None, 1, None]
def test_right_x_x():
df = df_a.join(other=df_b, on='x', rsuffix='_r', how='right')
assert df['a'].tolist() == ['C', 'B', 'A']
assert df['b'].tolist() == ['A', 'B', 'D']
assert df['x'].tolist() == [2, 1, 0]
assert df['x_r'].tolist() == [2, 1, 0]
assert df['y'].tolist() == [2, None, 0]
assert df['y_r'].tolist() == [None, 1, 2]
assert 'y_r' not in df_b
def test_left_dup():
df = df_a.join(df_dup, left_on='a', right_on='b', rsuffix='_r', allow_duplication=True)
assert len(df) == 4
# df = df_a.join(df_dup, on='x', rsuffix='_r')
# df = df_a.join(df_dup, on='m', rsuffix='_r')
def test_left_a_c():
df = df_a.join(df_c, left_on='a', right_on='c', how='left')
assert df.a.tolist() == ['A', 'B', 'C']
assert df.x.tolist() == [0, 1, 2]
assert df.y.tolist() == [0., None, 2.]
assert df.m.tolist() == [1, None, 3]
assert df.c.tolist() == [None, 'B', 'C']
assert df.z1.tolist() == [None, -1., -2.]
assert df.z2.tolist() == [None, True, False]
def test_join_a_a_suffix_check():
df = df_a.join(df_a, on='a', lsuffix='_left', rsuffix='_right')
assert set(df.column_names) == {'a_left', 'x_left', 'y_left', 'm_left', 'a_right', 'x_right', 'y_right', 'm_right'}
def test_join_a_a_prefix_check():
df = df_a.join(df_a, on='a', lprefix='left_', rprefix='right_')
assert set(df.column_names) == {'left_a', 'left_x', 'left_y', 'left_m', 'right_a', 'right_x', 'right_y', 'right_m'}
def test_inner_a_d():
df = df_a.join(df_d, on='a', right_on='a', how='inner', rsuffix='_r')
assert df.a.tolist() == ['B', 'C']
assert df.x.tolist() == [1., 2.]
assert df.y.tolist() == [None, 2.]
assert df.m.tolist() == [None, 3.]
assert df.x1.tolist() == ['dog', 'cat']
assert df.x2.tolist() == [3.1, 25.]
@pytest.mark.skip(reason='full join not supported yet')
def test_full_a_d():
df = df_a.join(df_d, on='a', right_on='a', how='full')
assert df.a.tolist() == ['A', 'B', 'C', 'D']
assert df.x.tolist() == [0., 1., 2., None]
assert df.y.tolist() == [0., None, 2., None]
assert df.m.tolist() == [1, None, 3, None]
assert df.x1.tolist() == [None, 'dog', 'cat', 'mouse']
assert df.x2.tolist() == [None, 3.1, 25., np.nan]
np.testing.assert_array_equal(np.array(df_d.x2.values), np.array([3.1, 25., np.nan]))
def test_left_virtual_filter():
df = df_a.join(df_d, on='a', how='left', rsuffix='_b')
df['r'] = df.x + df.x2
df = df[df.r > 10]
assert set(df[0]) == {'C', 2.0, 2.0, 3, 'C', 'cat', 25.0, 27.0}
def test_left_on_virtual_col():
mapper = {0: 'A', 1: 'B', 2: 'C'}
df_a['aa'] = df_a.x.map(mapper=mapper)
df = df_a.join(df_d, left_on='aa', right_on='a', rsuffix='_right')
assert df.a.tolist() == ['A', 'B', 'C']
assert df.aa.tolist() == ['A', 'B', 'C']
assert df.x.tolist() == [0, 1, 2]
assert df.y.tolist() == [0., None, 2.]
assert df.m.tolist() == [1, None, 3]
assert df.x1.tolist() == [None, 'dog', 'cat']
assert df.x2.tolist() == [None, 3.1, 25.]
assert df.a_right.tolist() == [None, 'B', 'C']
def test_join_filtered_inner():
df_a_filtered = df_a[df_a.y > 0]
df_joined = df_a_filtered.join(other=df_b, on='x', how='inner', rsuffix='_', allow_duplication=True)
assert len(df_joined) == len(df_a_filtered)
x = np.arange(20)
df = vaex.from_arrays(x=x, y=x**2)
df = df[df.x > 5]
dfj = df.join(df, on='x', rsuffix='right_', how='inner')
repr(dfj) # trigger issue with selection cache
def test_join_duplicate_column():
df_left = vaex.from_arrays(index=[1, 2, 3], x=[10, 20, 30])
df_right = vaex.from_arrays(index=[1, 2, 3], y=[0.1, 0.2, 0.3])
df = df_left.join(df_right, on='index')
assert df.column_count() == 3
assert set(df.column_names) == {'index', 'x', 'y'}
assert df['index'].tolist() == [1, 2, 3]
assert df.x.tolist() == [10, 20, 30]
assert df.y.tolist() == [0.1, 0.2, 0.3]
# we join row based and on a column
@pytest.mark.parametrize("on", [None, 'j'])
def test_join_virtual_columns(on):
df1 = vaex.from_scalars(j=444, x=1, y=2)
df1['z'] = df1.x + df1.y
df1['__h'] = df1.z * 2
df2 = vaex.from_scalars(j=444, x=2, yy=3)
df2['z'] = df2.x + df2.yy
df2['__h'] = df2.z * 3
df = df1.join(df2, rprefix='r_', rsuffix='_rhs', on=on)
assert df.x.values[0] == 1
assert df.y.values[0] == 2
assert df.z.values[0] == 3
assert df.__h.values[0] == 6
assert df.r_x_rhs.values[0] == 2
assert df.yy.values[0] == 3
assert df.r_z_rhs.values[0] == 5
assert df.__r_h_rhs.values[0] == 15
def test_join_variables():
df1 = vaex.from_scalars(j=444, x=1, y=2)
df1.add_variable('a', 2)
df1.add_variable('b', 3)
df1['z'] = df1.x * df1['a'] + df1.y * df1['b']
df2 = vaex.from_scalars(j=444, x=2, yy=3)
df2.add_variable('a', 3)
df2.add_variable('b', 4)
df2['z'] = df2.x * df2['a'] + df2.yy * df2['b']
df = df1.join(df2, rprefix='r_', rsuffix='_rhs')
assert df.x.values[0] == 1
assert df.y.values[0] == 2
assert df.z.values[0] == 2 + 2*3
# assert df.__h.values[0] == 6
assert df.r_x_rhs.values[0] == 2
assert df.yy.values[0] == 3
assert df.r_z_rhs.values[0] == 2*3 + 3*4
def test_with_masked_no_short_circuit():
# this test that the full table is joined, in some rare condition
# it can happen that the left table has a value not present in the right
# which causes it to not evaluate the other lookups, due to Python's short circuit
# behaviour. E.g. True or func() will not call func
N = 1000
df = vaex.from_arrays(i=np.arange(100) % 10)
df_right = vaex.from_arrays(i=np.arange(9), j=np.arange(9))
with small_buffer(df, size=1):
dfj = df.join(other=df_right, on='i')
assert dfj.columns['j'].masked
assert dfj[:10].columns['j'].masked
assert dfj['j'][:10].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, None]
dfj['j'].tolist() # make sure we can evaluate the whole column
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import logging
import os
import unittest
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from django.utils import timezone
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
from horizon import exceptions
from horizon.workflows import views
from openstack_auth import policy as policy_backend
from openstack_dashboard import api
from openstack_dashboard.dashboards.identity.projects import workflows
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
from openstack_dashboard.usage import quotas
with_sel = os.environ.get('WITH_SELENIUM', False)
if with_sel:
from selenium.webdriver import ActionChains # noqa
from selenium.webdriver.common import keys
from socket import timeout as socket_timeout # noqa
INDEX_URL = reverse('horizon:identity:projects:index')
USER_ROLE_PREFIX = workflows.PROJECT_USER_MEMBER_SLUG + "_role_"
GROUP_ROLE_PREFIX = workflows.PROJECT_GROUP_MEMBER_SLUG + "_role_"
PROJECT_DETAIL_URL = reverse('horizon:identity:projects:detail', args=[1])
class TenantsViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('domain_get',
'tenant_list',
'domain_lookup')})
def test_index(self):
domain = self.domains.get(id="1")
api.keystone.domain_get(IsA(http.HttpRequest), '1').AndReturn(domain)
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=None,
paginate=True,
marker=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.domain_lookup(IgnoreArg()).AndReturn({domain.id:
domain.name})
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
@test.create_stubs({api.keystone: ('tenant_list',
'get_effective_domain_id',
'domain_lookup')})
def test_index_with_domain_context(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
domain_tenants = [tenant for tenant in self.tenants.list()
if tenant.domain_id == domain.id]
api.keystone.get_effective_domain_id(IgnoreArg()).AndReturn(domain.id)
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=domain.id,
paginate=True,
marker=None) \
.AndReturn([domain_tenants, False])
api.keystone.domain_lookup(IgnoreArg()).AndReturn({domain.id:
domain.name})
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, domain_tenants)
self.assertContains(res, "<em>test_domain:</em>")
class ProjectsViewNonAdminTests(test.TestCase):
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
@test.create_stubs({api.keystone: ('tenant_list',
'domain_lookup')})
def test_index(self):
domain = self.domains.get(id="1")
api.keystone.tenant_list(IsA(http.HttpRequest),
user=self.user.id,
paginate=True,
marker=None,
admin=False) \
.AndReturn([self.tenants.list(), False])
api.keystone.domain_lookup(IgnoreArg()).AndReturn({domain.id:
domain.name})
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
class CreateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_project_info(self, project):
domain = self._get_default_domain()
project_info = {"name": project.name,
"description": project.description,
"enabled": project.enabled,
"domain": domain.id}
return project_info
def _get_workflow_fields(self, project):
domain = self._get_default_domain()
project_info = {"domain_id": domain.id,
"domain_name": domain.name,
"name": project.name,
"description": project.description,
"enabled": project.enabled}
return project_info
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_workflow_data(self, project, quota):
project_info = self._get_workflow_fields(project)
quota_data = self._get_quota_info(quota)
project_info.update(quota_data)
return project_info
def _get_default_domain(self):
default_domain = self.domain
domain = {"id": self.request.session.get('domain_context',
default_domain.id),
"name": self.request.session.get('domain_context_name',
default_domain.name)}
return api.base.APIDictWrapper(domain)
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',),
api.neutron: ('is_extension_supported',),
quotas: ('get_default_quota_data',)})
def test_add_project_get(self):
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.cinder.is_volume_service_enabled(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, '<input type="hidden" name="subnet" '
'id="id_subnet" />', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertQuerysetEqual(
workflow.steps,
['<CreateProjectInfo: createprojectinfoaction>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<CreateProjectQuota: create_quotas>'])
def test_add_project_get_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_get()
@test.create_stubs({api.keystone: ('get_default_role',
'user_list',
'group_list',
'role_list',
'domain_get'),
api.neutron: ('is_extension_supported',
'tenant_quota_get'),
quotas: ('get_default_quota_data',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_get_with_neutron(self):
quota = self.quotas.first()
neutron_quotas = self.neutron_quotas.first()
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(quota)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(neutron_quotas)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.users.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:identity:projects:create'))
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, '''
<input class="form-control"
id="id_subnet" min="-1"
name="subnet" type="number" value="10" />
''', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['subnet'],
neutron_quotas.get('subnet').limit)
@test.create_stubs({api.keystone: ('get_default_role',
'add_tenant_user_role',
'tenant_create',
'user_list',
'group_list',
'role_list',
'domain_get'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_post(self, neutron=False):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_post_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_post()
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_post_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_add_project_post(neutron=True)
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'is_cloud_admin',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas')})
def test_add_project_quota_defaults_error(self):
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
api.keystone.is_cloud_admin(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, "Unable to retrieve default quota values")
def test_add_project_quota_defaults_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_defaults_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_tenant_create_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_tenant_create_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_tenant_create_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.nova: ('tenant_quota_update',)})
def test_add_project_quota_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_quota_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_update_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_user_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id) \
.AndRaise(self.exceptions.keystone)
break
break
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_user_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_user_update_error()
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_missing_field_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
workflow_data["name"] = ""
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertContains(res, "field is required")
def test_add_project_missing_field_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_missing_field_error()
class UpdateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
def _get_proj_users(self, project_id):
return [user for user in self.users.list()
if user.project_id == project_id]
def _get_proj_groups(self, project_id):
return [group for group in self.groups.list()
if group.project_id == project_id]
def _get_proj_role_assignment(self, project_id):
project_scope = {'project': {'id': project_id}}
return self.role_assignments.filter(scope=project_scope)
def _check_role_list(self, keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data):
if keystone_api_version >= 3:
# admin role with attempt to remove current admin, results in
# warning message
workflow_data[USER_ROLE_PREFIX + "1"] = ['3']
# member role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '3']
# admin role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['2', '3']
# member role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3']
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.MultipleTimes().AndReturn(role_assignments)
# Give user 1 role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='1',
role='2',).InAnyOrder()
# remove role 2 from user 2
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='2').InAnyOrder()
# Give user 3 role 1
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='1',).InAnyOrder()
api.keystone.group_list(IsA(http.HttpRequest),
domain=self.domain.id,
project=self.tenant.id) \
.AndReturn(groups)
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
api.keystone.remove_group_role(IsA(http.HttpRequest),
project=self.tenant.id,
group='1',
role='1')
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn(roles)
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn(roles)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id).AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id).AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id).AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')\
.AndRaise(self.exceptions.keystone)
@test.create_stubs({api.keystone: ('get_default_role',
'roles_for_user',
'tenant_get',
'domain_get',
'user_list',
'roles_for_group',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_get(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self._get_proj_role_assignment(project.id)
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.MultipleTimes().AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.UpdateProject.name)
step = workflow.get_step("update_info")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertEqual(step.action.initial['name'], project.name)
self.assertEqual(step.action.initial['description'],
project.description)
self.assertQuerysetEqual(
workflow.steps,
['<UpdateProjectInfo: update_info>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<UpdateProjectQuota: update_quotas>'])
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'get_effective_domain_id',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
api.nova: ('tenant_quota_update',),
api.cinder: ('tenant_quota_update',),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',)})
def test_update_project_save(self, neutron=False):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version < 3:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
workflow_data[USER_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['2'] # member role
# Group assignment form data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['2'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_quota = self._get_quota_info(quota)
# called once for tenant_update
api.keystone.get_effective_domain_id(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(domain_id)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
name=project._info["name"],
description=project._info['description'],
enabled=project.enabled,
domain=domain_id).AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
domain=domain_id).AndReturn(users)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_get',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_update_project_save_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota_data)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_update_project_save(neutron=True)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_project_get_error(self):
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'get_effective_domain_id',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_tenant_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self.role_assignments.list()
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.MultipleTimes().AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
for user in proj_users:
if role_ids:
workflow_data.setdefault(USER_ROLE_PREFIX + role_ids[0], []) \
.append(user.id)
role_ids = [role.id for role in roles]
for group in groups:
if role_ids:
workflow_data.setdefault(GROUP_ROLE_PREFIX + role_ids[0], []) \
.append(group.id)
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.get_effective_domain_id(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(domain_id)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
name=project._info["name"],
domain=domain_id,
description=project._info['description'],
enabled=project.enabled) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'get_effective_domain_id',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_quota_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest),
domain=domain_id).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version < 3:
api.keystone.user_list(
IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# Group role assignment data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota[0].limit = 444
quota[1].limit = -1
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.get_effective_domain_id(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(domain_id)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
name=project._info["name"],
description=project._info['description'],
enabled=project.enabled,
domain=domain_id).AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
domain=domain_id).AndReturn(users)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list',
'get_effective_domain_id'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.nova: ('tenant_quota_update',)})
def test_update_project_member_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.MultipleTimes().AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version < 3:
api.keystone.user_list(
IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.get_effective_domain_id(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(domain_id)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
name=project._info["name"],
description=project._info['description'],
enabled=project.enabled,
domain=domain_id).AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
domain=domain_id).AndReturn(users)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
api.nova.tenant_quota_update(IsA(http.HttpRequest), project.id,
**updated_quota)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('get_default_role',
'tenant_get',
'domain_get'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_when_default_role_does_not_exist(self):
project = self.tenants.first()
domain_id = project.domain_id
quota = self.quotas.first()
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(None) # Default role doesn't exist
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
try:
# Avoid the log message in the test output when the workflow's
# step action cannot be instantiated
logging.disable(logging.ERROR)
with self.assertRaises(exceptions.NotFound):
self.client.get(url)
finally:
logging.disable(logging.NOTSET)
class UsageViewTests(test.BaseAdminViewTests):
def _stub_nova_api_calls(self, nova_stu_enabled=True):
self.mox.StubOutWithMock(api.nova, 'usage_get')
self.mox.StubOutWithMock(api.nova, 'tenant_absolute_limits')
self.mox.StubOutWithMock(api.nova, 'extension_supported')
self.mox.StubOutWithMock(api.cinder, 'tenant_absolute_limits')
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
def _stub_neutron_api_calls(self, neutron_sg_enabled=True):
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
if neutron_sg_enabled:
self.mox.StubOutWithMock(api.network, 'security_group_list')
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
if nova_stu_enabled:
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self._stub_neutron_api_calls()
self.mox.ReplayAll()
project_id = self.tenants.first().id
csv_url = reverse('horizon:identity:projects:usage',
args=[project_id]) + "?format=csv"
res = self.client.get(csv_url)
self.assertTemplateUsed(res, 'project/overview/usage.csv')
self.assertIsInstance(res.context['usage'], usage.ProjectUsage)
hdr = ('Instance Name,VCPUs,RAM (MB),Disk (GB),Usage (Hours),'
'Time since created (Seconds),State')
self.assertContains(res, '%s\r\n' % hdr)
class DetailProjectViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_detail_view(self):
project = self.tenants.first()
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(project)
self.mox.ReplayAll()
res = self.client.get(PROJECT_DETAIL_URL, args=[project.id])
self.assertTemplateUsed(res, 'identity/projects/detail.html')
self.assertEqual(res.context['project'].name, project.name)
self.assertEqual(res.context['project'].id, project.id)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_detail_view_with_exception(self):
project = self.tenants.first()
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
res = self.client.get(PROJECT_DETAIL_URL, args=[project.id])
self.assertRedirectsNoFollow(res, INDEX_URL)
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTests(test.SeleniumAdminTestCase):
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get', 'tenant_update',
'domain_lookup')})
def test_inline_editing_update(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
api.keystone.domain_lookup(IgnoreArg()).AndReturn({None: None})
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Update - requires get and update
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
api.keystone.tenant_update(
IgnoreArg(),
u'1',
description='a test tenant.',
enabled=True,
name=u'Changed test_tenant')
# Refreshing cell with changed name
changed_tenant = copy.copy(self.tenants.list()[0])
changed_tenant.name = u'Changed test_tenant'
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(changed_tenant)
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit button
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Changing project name in cell form
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
name_input = td_element.find_element_by_tag_name('input')
name_input.send_keys(keys.Keys.HOME)
name_input.send_keys("Changed ")
# Saving new project name by AJAX
td_element.find_element_by_class_name('inline-edit-submit').click()
# Waiting for the AJAX response of cell refresh
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']"))
# Checking new project name after cell refresh
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'Changed test_tenant',
"Error: saved tenant name is expected to be "
"'Changed test_tenant'")
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get', 'domain_lookup')})
def test_inline_editing_cancel(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
api.keystone.domain_lookup(IgnoreArg()).AndReturn({None: None})
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Cancel edit mod is without the request
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Click on cancel button
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
td_element.find_element_by_class_name('inline-edit-cancel').click()
# Cancel is via javascript, so it should be immediate
# Checking that tenant name is not changed
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'test_tenant',
"Error: saved tenant name is expected to be "
"'test_tenant'")
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',),
quotas: ('get_default_quota_data',)})
def test_membership_list_loads_correctly(self):
member_css_class = ".available_members"
users = self.users.list()
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.cinder.is_volume_service_enabled(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(False)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(self.domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(self.quotas.first())
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
self.selenium.get("%s%s" %
(self.live_server_url,
reverse('horizon:identity:projects:create')))
members = self.selenium.find_element_by_css_selector(member_css_class)
for user in users:
self.assertIn(user.name, members.text)
@override_settings(OPENSTACK_KEYSTONE_ADMIN_ROLES=['foO', 'BAR', 'admin'])
def test_get_admin_roles(self):
mix_in = workflows.IdentityMixIn()
admin_roles = mix_in.get_admin_roles()
self.assertEqual(['foo', 'bar', 'admin'], admin_roles)
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import Iterable
import colorsys
import operator
import re
import string
from warnings import warn
import six
from scss.cssdefs import COLOR_LOOKUP, COLOR_NAMES, ZEROABLE_UNITS, convert_units_to_base_units, cancel_base_units, count_base_units
PRECISION = 5
###############################################################################
# pyScss data types:
# TODO make Value work as a string in every way? i.e. have a .quotes...
class Value(object):
is_null = False
sass_type_name = 'unknown'
def __repr__(self):
return "<{0}: {1!r}>".format(type(self).__name__, self.value)
# Sass values are all true, except for booleans and nulls
def __bool__(self):
return True
def __nonzero__(self):
# Py 2's name for __bool__
return self.__bool__()
# All Sass scalars also act like one-element spaced lists
use_comma = False
def __iter__(self):
return iter((self,))
def __len__(self):
return 1
def __getitem__(self, key):
if key not in (-1, 0):
raise IndexError(key)
return self
def __contains__(self, item):
return self == item
### NOTE: From here on down, the operators are exposed to Sass code and
### thus should ONLY return Sass types
# Reasonable default for equality
def __eq__(self, other):
return Boolean(
type(self) == type(other) and self.value == other.value)
def __ne__(self, other):
return Boolean(not self.__eq__(other))
# Only numbers support ordering
def __lt__(self, other):
raise TypeError("Can't compare %r with %r" % (self, other))
def __le__(self, other):
raise TypeError("Can't compare %r with %r" % (self, other))
def __gt__(self, other):
raise TypeError("Can't compare %r with %r" % (self, other))
def __ge__(self, other):
raise TypeError("Can't compare %r with %r" % (self, other))
# Math ops
def __add__(self, other):
# Default behavior is to treat both sides like strings
if isinstance(other, String):
return String(self.render() + other.value, quotes=other.quotes)
return String(self.render() + other.render())
def __sub__(self, other):
# Default behavior is to treat the whole expression like one string
return String.unquoted(self.render() + "-" + other.render())
def __div__(self, other):
return String.unquoted(self.render() + "/" + other.render())
# Sass types have no notion of floor vs true division
def __truediv__(self, other):
return self.__div__(other)
def __floordiv__(self, other):
return self.__div__(other)
def __mul__(self, other):
return NotImplemented
def __pos__(self):
return String("+" + self.render())
def __neg__(self):
return String("-" + self.render())
def to_dict(self):
"""Return the Python dict equivalent of this map.
If this type can't be expressed as a map, raise.
"""
return dict(self.to_pairs())
def to_pairs(self):
"""Return the Python list-of-tuples equivalent of this map. Note that
this is different from ``self.to_dict().items()``, because Sass maps
preserve order.
If this type can't be expressed as a map, raise.
"""
raise ValueError("Not a map: {0!r}".format(self))
def render(self, compress=False):
"""Return this value's CSS representation as a string (text, i.e.
unicode!).
If `compress` is true, try hard to shorten the string at the cost of
readability.
"""
raise NotImplementedError
def render_interpolated(self, compress=False):
"""Return this value's string representation as appropriate for
returning from an interpolation.
"""
return self.render(compress)
class Null(Value):
is_null = True
sass_type_name = 'null'
def __init__(self, value=None):
pass
def __str__(self):
return self.sass_type_name
def __repr__(self):
return "<{0}>".format(type(self).__name__)
def __hash__(self):
return hash(None)
def __bool__(self):
return False
def __eq__(self, other):
return Boolean(isinstance(other, Null))
def __ne__(self, other):
return Boolean(not self.__eq__(other))
def render(self, compress=False):
return self.sass_type_name
def render_interpolated(self, compress=False):
# Interpolating a null gives you nothing.
return ''
class Undefined(Null):
sass_type_name = 'undefined'
def __init__(self, value=None):
pass
def __add__(self, other):
return self
def __radd__(self, other):
return self
def __sub__(self, other):
return self
def __rsub__(self, other):
return self
def __div__(self, other):
return self
def __rdiv__(self, other):
return self
def __truediv__(self, other):
return self
def __rtruediv__(self, other):
return self
def __floordiv__(self, other):
return self
def __rfloordiv__(self, other):
return self
def __mul__(self, other):
return self
def __rmul__(self, other):
return self
def __pos__(self):
return self
def __neg__(self):
return self
class Boolean(Value):
sass_type_name = 'bool'
def __init__(self, value):
self.value = bool(value)
def __str__(self):
return 'true' if self.value else 'false'
def __hash__(self):
return hash(self.value)
def __bool__(self):
return self.value
def render(self, compress=False):
if self.value:
return 'true'
else:
return 'false'
class Number(Value):
sass_type_name = 'number'
def __init__(self, amount, unit=None, unit_numer=(), unit_denom=()):
if isinstance(amount, Number):
assert not unit and not unit_numer and not unit_denom
self.value = amount.value
self.unit_numer = amount.unit_numer
self.unit_denom = amount.unit_denom
return
if not isinstance(amount, (int, float)):
raise TypeError("Expected number, got %r" % (amount,))
if unit is not None:
unit_numer = unit_numer + (unit.lower(),)
# Cancel out any convertable units on the top and bottom
numerator_base_units = count_base_units(unit_numer)
denominator_base_units = count_base_units(unit_denom)
# Count which base units appear both on top and bottom
cancelable_base_units = {}
for unit, count in numerator_base_units.items():
cancelable_base_units[unit] = min(
count, denominator_base_units.get(unit, 0))
# Actually remove the units
numer_factor, unit_numer = cancel_base_units(unit_numer, cancelable_base_units)
denom_factor, unit_denom = cancel_base_units(unit_denom, cancelable_base_units)
# And we're done
self.unit_numer = tuple(unit_numer)
self.unit_denom = tuple(unit_denom)
self.value = amount * (numer_factor / denom_factor)
def __repr__(self):
value = self.value
int_value = int(value)
if value == int_value:
value = int_value
full_unit = ' * '.join(self.unit_numer)
if self.unit_denom:
full_unit += ' / '
full_unit += ' * '.join(self.unit_denom)
if full_unit:
full_unit = ' ' + full_unit
return "<{0} {1}{2}>".format(type(self).__name__, value, full_unit)
def __hash__(self):
return hash((self.value, self.unit_numer, self.unit_denom))
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __pos__(self):
return self
def __neg__(self):
return self * Number(-1)
def __str__(self):
return self.render()
def __eq__(self, other):
if not isinstance(other, Number):
return Boolean(False)
return self._compare(other, operator.__eq__, soft_fail=True)
def __lt__(self, other):
return self._compare(other, operator.__lt__)
def __le__(self, other):
return self._compare(other, operator.__le__)
def __gt__(self, other):
return self._compare(other, operator.__gt__)
def __ge__(self, other):
return self._compare(other, operator.__ge__)
def _compare(self, other, op, soft_fail=False):
if not isinstance(other, Number):
raise TypeError("Can't compare %r and %r" % (self, other))
# A unitless operand is treated as though it had the other operand's
# units, and zero values can cast to anything, so in both cases the
# units can be ignored
if (self.is_unitless or other.is_unitless or
self.value == 0 or other.value == 0):
left = self
right = other
else:
left = self.to_base_units()
right = other.to_base_units()
if left.unit_numer != right.unit_numer or left.unit_denom != right.unit_denom:
if soft_fail:
# Used for equality only, where == should never fail
return Boolean(False)
else:
raise ValueError("Can't reconcile units: %r and %r" % (self, other))
return Boolean(op(round(left.value, PRECISION), round(right.value, PRECISION)))
def __pow__(self, exp):
if not isinstance(exp, Number):
raise TypeError("Can't raise %r to power %r" % (self, exp))
if not exp.is_unitless:
raise TypeError("Exponent %r cannot have units" % (exp,))
if self.is_unitless:
return Number(self.value ** exp.value)
# Units can only be exponentiated to integral powers -- what's the
# square root of 'px'? (Well, it's sqrt(px), but supporting that is
# a bit out of scope.)
if exp.value != int(exp.value):
raise ValueError("Can't raise units of %r to non-integral power %r" % (self, exp))
return Number(
self.value ** int(exp.value),
unit_numer=self.unit_numer * int(exp.value),
unit_denom=self.unit_denom * int(exp.value),
)
def __mul__(self, other):
if not isinstance(other, Number):
return NotImplemented
amount = self.value * other.value
numer = self.unit_numer + other.unit_numer
denom = self.unit_denom + other.unit_denom
return Number(amount, unit_numer=numer, unit_denom=denom)
def __div__(self, other):
if not isinstance(other, Number):
return NotImplemented
amount = self.value / other.value
numer = self.unit_numer + other.unit_denom
denom = self.unit_denom + other.unit_numer
return Number(amount, unit_numer=numer, unit_denom=denom)
def __mod__(self, other):
if not isinstance(other, Number):
return NotImplemented
amount = self.value % other.value
if self.is_unitless:
return Number(amount)
if not other.is_unitless:
left = self.to_base_units()
right = other.to_base_units()
if left.unit_numer != right.unit_numer or left.unit_denom != right.unit_denom:
raise ValueError("Can't reconcile units: %r and %r" % (self, other))
return Number(amount, unit_numer=self.unit_numer, unit_denom=self.unit_denom)
def __add__(self, other):
# Numbers auto-cast to strings when added to other strings
if isinstance(other, String):
return String(self.render(), quotes=None) + other
return self._add_sub(other, operator.add)
def __sub__(self, other):
return self._add_sub(other, operator.sub)
def _add_sub(self, other, op):
"""Implements both addition and subtraction."""
if not isinstance(other, Number):
return NotImplemented
# If either side is unitless, inherit the other side's units. Skip all
# the rest of the conversion math, too.
if self.is_unitless or other.is_unitless:
return Number(
op(self.value, other.value),
unit_numer=self.unit_numer or other.unit_numer,
unit_denom=self.unit_denom or other.unit_denom,
)
# Likewise, if either side is zero, it can auto-cast to any units
if self.value == 0:
return Number(
op(self.value, other.value),
unit_numer=other.unit_numer,
unit_denom=other.unit_denom,
)
elif other.value == 0:
return Number(
op(self.value, other.value),
unit_numer=self.unit_numer,
unit_denom=self.unit_denom,
)
# Reduce both operands to the same units
left = self.to_base_units()
right = other.to_base_units()
if left.unit_numer != right.unit_numer or left.unit_denom != right.unit_denom:
raise ValueError("Can't reconcile units: %r and %r" % (self, other))
new_amount = op(left.value, right.value)
# Convert back to the left side's units
if left.value != 0:
new_amount = new_amount * self.value / left.value
return Number(new_amount, unit_numer=self.unit_numer, unit_denom=self.unit_denom)
### Helper methods, mostly used internally
def to_base_units(self):
"""Convert to a fixed set of "base" units. The particular units are
arbitrary; what's important is that they're consistent.
Used for addition and comparisons.
"""
# Convert to "standard" units, as defined by the conversions dict above
amount = self.value
numer_factor, numer_units = convert_units_to_base_units(self.unit_numer)
denom_factor, denom_units = convert_units_to_base_units(self.unit_denom)
return Number(
amount * numer_factor / denom_factor,
unit_numer=numer_units,
unit_denom=denom_units,
)
### Utilities for public consumption
@classmethod
def wrap_python_function(cls, fn):
"""Wraps an unary Python math function, translating the argument from
Sass to Python on the way in, and vice versa for the return value.
Used to wrap simple Python functions like `ceil`, `floor`, etc.
"""
def wrapped(sass_arg):
# TODO enforce no units for trig?
python_arg = sass_arg.value
python_ret = fn(python_arg)
sass_ret = cls(
python_ret,
unit_numer=sass_arg.unit_numer,
unit_denom=sass_arg.unit_denom)
return sass_ret
return wrapped
def to_python_index(self, length, check_bounds=True, circular=False):
"""Return a plain Python integer appropriate for indexing a sequence of
the given length. Raise if this is impossible for any reason
whatsoever.
"""
if not self.is_unitless:
raise ValueError("Index cannot have units: {0!r}".format(self))
ret = int(self.value)
if ret != self.value:
raise ValueError("Index must be an integer: {0!r}".format(ret))
if ret == 0:
raise ValueError("Index cannot be zero")
if check_bounds and not circular and abs(ret) > length:
raise ValueError("Index {0!r} out of bounds for length {1}".format(ret, length))
if ret > 0:
ret -= 1
if circular:
ret = ret % length
return ret
@property
def has_simple_unit(self):
"""Returns True iff the unit is expressible in CSS, i.e., has no
denominator and at most one unit in the numerator.
"""
return len(self.unit_numer) <= 1 and not self.unit_denom
def is_simple_unit(self, unit):
"""Return True iff the unit is simple (as above) and matches the given
unit.
"""
if self.unit_denom or len(self.unit_numer) > 1:
return False
if not self.unit_numer:
# Empty string historically means no unit
return unit == ''
return self.unit_numer[0] == unit
@property
def is_unitless(self):
return not self.unit_numer and not self.unit_denom
def render(self, compress=False):
if not self.has_simple_unit:
raise ValueError("Can't express compound units in CSS: %r" % (self,))
if self.unit_numer:
unit = self.unit_numer[0]
else:
unit = ''
value = self.value
if compress and unit in ZEROABLE_UNITS and value == 0:
return '0'
if value == 0: # -0.0 is plain 0
value = 0
val = ('%%0.0%df' % PRECISION) % round(value, PRECISION)
val = val.rstrip('0').rstrip('.')
if compress and val.startswith('0.'):
# Strip off leading zero when compressing
val = val[1:]
return val + unit
class List(Value):
"""A list of other values. May be delimited by commas or spaces.
Lists of one item don't make much sense in CSS, but can exist in Sass. Use ......
Lists may also contain zero items, but these are forbidden from appearing
in CSS output.
"""
sass_type_name = 'list'
def __init__(self, iterable, separator=None, use_comma=None, literal=False):
if isinstance(iterable, List):
iterable = iterable.value
if (not isinstance(iterable, Iterable) or
isinstance(iterable, six.string_types)):
raise TypeError("Expected list, got %r" % (iterable,))
self.value = list(iterable)
for item in self.value:
if not isinstance(item, Value):
raise TypeError("Expected a Sass type, got %r" % (item,))
# TODO remove separator argument entirely
if use_comma is None:
self.use_comma = separator == ","
else:
self.use_comma = use_comma
self.literal = literal
@classmethod
def maybe_new(cls, values, use_comma=True):
"""If `values` contains only one item, return that item. Otherwise,
return a List as normal.
"""
if len(values) == 1:
return values[0]
else:
return cls(values, use_comma=use_comma)
def maybe(self):
"""If this List contains only one item, return it. Otherwise, return
the List.
"""
if len(self.value) == 1:
return self.value[0]
else:
return self
@classmethod
def from_maybe(cls, values, use_comma=True):
"""If `values` appears to not be a list, return a list containing it.
Otherwise, return a List as normal.
"""
if values is None:
values = []
return values
@classmethod
def from_maybe_starargs(cls, args, use_comma=True):
"""If `args` has one element which appears to be a list, return it.
Otherwise, return a list as normal.
Mainly used by Sass function implementations that predate `...`
support, so they can accept both a list of arguments and a single list
stored in a variable.
"""
if len(args) == 1:
if isinstance(args[0], cls):
return args[0]
elif isinstance(args[0], (list, tuple)):
return cls(args[0], use_comma=use_comma)
return cls(args, use_comma=use_comma)
def __repr__(self):
return "<{0} {1}>".format(
type(self).__name__,
self.delimiter().join(repr(item) for item in self),
)
def __hash__(self):
return hash((tuple(self.value), self.use_comma))
def delimiter(self, compress=False):
if self.use_comma:
if compress:
return ','
else:
return ', '
else:
return ' '
def __len__(self):
return len(self.value)
def __str__(self):
return self.render()
def __iter__(self):
return iter(self.value)
def __contains__(self, item):
return item in self.value
def __getitem__(self, key):
return self.value[key]
def to_pairs(self):
pairs = []
for item in self:
if len(item) != 2:
return super(List, self).to_pairs()
pairs.append(tuple(item))
return pairs
def render(self, compress=False):
if not self.value:
raise ValueError("Can't render empty list as CSS")
delim = self.delimiter(compress)
if self.literal:
value = self.value
else:
# Non-literal lists have nulls stripped
value = [item for item in self.value if not item.is_null]
# Non-empty lists containing only nulls become nothing, just like
# single nulls
if not value:
return ''
return delim.join(
item.render(compress=compress)
for item in value
)
def render_interpolated(self, compress=False):
return self.delimiter(compress).join(
item.render_interpolated(compress) for item in self)
# DEVIATION: binary ops on lists and scalars act element-wise
def __add__(self, other):
if isinstance(other, List):
max_list, min_list = (self, other) if len(self) > len(other) else (other, self)
return List([item + max_list[i] for i, item in enumerate(min_list)], use_comma=self.use_comma)
elif isinstance(other, String):
# UN-DEVIATION: adding a string should fall back to canonical
# behavior of string addition
return super(List, self).__add__(other)
else:
return List([item + other for item in self], use_comma=self.use_comma)
def __sub__(self, other):
if isinstance(other, List):
max_list, min_list = (self, other) if len(self) > len(other) else (other, self)
return List([item - max_list[i] for i, item in enumerate(min_list)], use_comma=self.use_comma)
return List([item - other for item in self], use_comma=self.use_comma)
def __mul__(self, other):
if isinstance(other, List):
max_list, min_list = (self, other) if len(self) > len(other) else (other, self)
max_list, min_list = (self, other) if len(self) > len(other) else (other, self)
return List([item * max_list[i] for i, item in enumerate(min_list)], use_comma=self.use_comma)
return List([item * other for item in self], use_comma=self.use_comma)
def __div__(self, other):
if isinstance(other, List):
max_list, min_list = (self, other) if len(self) > len(other) else (other, self)
return List([item / max_list[i] for i, item in enumerate(min_list)], use_comma=self.use_comma)
return List([item / other for item in self], use_comma=self.use_comma)
def __pos__(self):
return self
def __neg__(self):
return List([-item for item in self], use_comma=self.use_comma)
class Arglist(List):
"""An argument list. Acts mostly like a list, with keyword arguments sort
of tacked on separately, and only accessible via Python (or the Sass
`keywords` function).
"""
sass_type_name = 'arglist'
keywords_retrieved = False
def __init__(self, args, kwargs):
self._kwargs = Map(kwargs)
super(Arglist, self).__init__(args, use_comma=True)
def extract_keywords(self):
self.keywords_retrieved = True
return self._kwargs
def _constrain(value, lb=0, ub=1):
"""Helper for Color constructors. Constrains a value to a range."""
if value < lb:
return lb
elif value > ub:
return ub
else:
return value
class Color(Value):
sass_type_name = 'color'
original_literal = None
def __init__(self, tokens):
self.tokens = tokens
self.value = (0, 0, 0, 1)
if tokens is None:
self.value = (0, 0, 0, 1)
elif isinstance(tokens, Color):
self.value = tokens.value
else:
raise TypeError("Can't make Color from %r" % (tokens,))
### Alternate constructors
@classmethod
def from_rgb(cls, red, green, blue, alpha=1.0, original_literal=None):
red = _constrain(red)
green = _constrain(green)
blue = _constrain(blue)
alpha = _constrain(alpha)
self = cls.__new__(cls) # TODO
self.tokens = None
# TODO really should store these things internally as 0-1, but can't
# until stuff stops examining .value directly
self.value = (red * 255.0, green * 255.0, blue * 255.0, alpha)
if original_literal is not None:
self.original_literal = original_literal
return self
@classmethod
def from_hsl(cls, hue, saturation, lightness, alpha=1.0):
hue = _constrain(hue)
saturation = _constrain(saturation)
lightness = _constrain(lightness)
alpha = _constrain(alpha)
r, g, b = colorsys.hls_to_rgb(hue, lightness, saturation)
return cls.from_rgb(r, g, b, alpha)
@classmethod
def from_hex(cls, hex_string, literal=False):
if not hex_string.startswith('#'):
raise ValueError("Expected #abcdef, got %r" % (hex_string,))
if literal:
original_literal = hex_string
else:
original_literal = None
hex_string = hex_string[1:]
# Always include the alpha channel
if len(hex_string) == 3:
hex_string += 'f'
elif len(hex_string) == 6:
hex_string += 'ff'
# Now there should be only two possibilities. Normalize to a list of
# two hex digits
if len(hex_string) == 4:
chunks = [ch * 2 for ch in hex_string]
elif len(hex_string) == 8:
chunks = [
hex_string[0:2], hex_string[2:4], hex_string[4:6], hex_string[6:8]
]
rgba = [int(ch, 16) / 255 for ch in chunks]
return cls.from_rgb(*rgba, original_literal=original_literal)
@classmethod
def from_name(cls, name):
"""Build a Color from a CSS color name."""
self = cls.__new__(cls) # TODO
self.original_literal = name
r, g, b, a = COLOR_NAMES[name]
self.value = r, g, b, a
return self
### Accessors
@property
def rgb(self):
# TODO: deprecate, relies on internals
return tuple(self.value[:3])
@property
def rgba(self):
return (
self.value[0] / 255,
self.value[1] / 255,
self.value[2] / 255,
self.value[3],
)
@property
def hsl(self):
rgba = self.rgba
h, l, s = colorsys.rgb_to_hls(*rgba[:3])
return h, s, l
@property
def alpha(self):
return self.value[3]
@property
def rgba255(self):
return (
int(self.value[0] * 1 + 0.5),
int(self.value[1] * 1 + 0.5),
int(self.value[2] * 1 + 0.5),
int(self.value[3] * 255 + 0.5),
)
def __repr__(self):
return "<{0} {1}>".format(type(self).__name__, self.render())
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if not isinstance(other, Color):
return Boolean(False)
# Scale channels to 255 and round to integers; this allows only 8-bit
# color, but Ruby sass makes the same assumption, and otherwise it's
# easy to get lots of float errors for HSL colors.
left = tuple(round(n) for n in self.rgba255)
right = tuple(round(n) for n in other.rgba255)
return Boolean(left == right)
def __add__(self, other):
if isinstance(other, (Color, Number)):
return self._operate(other, operator.add)
else:
return super(Color, self).__add__(other)
def __sub__(self, other):
if isinstance(other, (Color, Number)):
return self._operate(other, operator.sub)
else:
return super(Color, self).__sub__(other)
def __mul__(self, other):
if isinstance(other, (Color, Number)):
return self._operate(other, operator.mul)
else:
return super(Color, self).__mul__(other)
def __div__(self, other):
if isinstance(other, (Color, Number)):
return self._operate(other, operator.div)
else:
return super(Color, self).__div__(other)
def _operate(self, other, op):
if isinstance(other, Number):
if not other.is_unitless:
raise ValueError("Expected unitless Number, got %r" % (other,))
other_rgb = (other.value,) * 3
elif isinstance(other, Color):
if self.alpha != other.alpha:
raise ValueError("Alpha channels must match between %r and %r"
% (self, other))
other_rgb = other.rgb
else:
raise TypeError("Expected Color or Number, got %r" % (other,))
new_rgb = [
min(255., max(0., op(left, right)))
# for from_rgb
/ 255.
for (left, right) in zip(self.rgb, other_rgb)
]
return Color.from_rgb(*new_rgb, alpha=self.alpha)
def render(self, compress=False):
"""Return a rendered representation of the color. If `compress` is
true, the shortest possible representation is used; otherwise, named
colors are rendered as names and all others are rendered as hex (or
with the rgba function).
"""
if not compress and self.original_literal:
return self.original_literal
candidates = []
# TODO this assumes CSS resolution is 8-bit per channel, but so does
# Ruby.
r, g, b, a = self.value
r, g, b = int(round(r)), int(round(g)), int(round(b))
# Build a candidate list in order of preference. If `compress` is
# True, the shortest candidate is used; otherwise, the first candidate
# is used.
# Try color name
key = r, g, b, a
if key in COLOR_LOOKUP:
candidates.append(COLOR_LOOKUP[key])
if a == 1:
# Hex is always shorter than function notation
if all(ch % 17 == 0 for ch in (r, g, b)):
candidates.append("#%1x%1x%1x" % (r // 17, g // 17, b // 17))
else:
candidates.append("#%02x%02x%02x" % (r, g, b))
else:
# Can't use hex notation for RGBA
if compress:
sp = ''
else:
sp = ' '
candidates.append("rgba(%d,%s%d,%s%d,%s%.2g)" % (r, sp, g, sp, b, sp, a))
if compress:
return min(candidates, key=len)
else:
return candidates[0]
# TODO be unicode-clean and delete this nonsense
DEFAULT_STRING_ENCODING = "utf8"
class String(Value):
"""Represents both CSS quoted string values and CSS identifiers (such as
`left`).
Makes no distinction between single and double quotes, except that the same
quotes are preserved on string literals that pass through unmodified.
Otherwise, double quotes are used.
"""
sass_type_name = 'string'
bad_identifier_rx = re.compile('[^-_a-zA-Z\x80-\U0010FFFF]')
def __init__(self, value, quotes='"', literal=False):
if isinstance(value, String):
# TODO unclear if this should be here, but many functions rely on
# it
value = value.value
elif isinstance(value, Number):
# TODO this may only be necessary in the case of __radd__ and
# number values
value = six.text_type(value)
if isinstance(value, six.binary_type):
warn(FutureWarning(
"String got a bytes type {0!r} "
"-- this will no longer be supported in pyScss 2.0"
.format(value)
))
value = value.decode(DEFAULT_STRING_ENCODING)
if not isinstance(value, six.text_type):
raise TypeError("Expected string, got {0!r}".format(value))
self.value = value
self.quotes = quotes
# TODO this isn't quite used yet
if literal:
self.original_literal = value
else:
self.original_literal = None
@classmethod
def unquoted(cls, value, literal=False):
"""Helper to create a string with no quotes."""
return cls(value, quotes=None, literal=literal)
def __hash__(self):
return hash(self.value)
def __repr__(self):
if self.quotes:
quotes = '(' + self.quotes + ')'
else:
quotes = ''
return "<{0}{1} {2!r}>".format(
type(self).__name__, quotes, self.value)
def __eq__(self, other):
return Boolean(isinstance(other, String) and self.value == other.value)
def __add__(self, other):
if isinstance(other, String):
other_value = other.value
else:
other_value = other.render()
return String(
self.value + other_value,
quotes='"' if self.quotes else None)
def __mul__(self, other):
# DEVIATION: Ruby Sass doesn't do this, because Ruby doesn't. But
# Python does, and in Ruby Sass it's just fatal anyway.
if not isinstance(other, Number):
return super(String, self).__mul__(other)
if not other.is_unitless:
raise TypeError("Can only multiply strings by unitless numbers")
n = other.value
if n != int(n):
raise ValueError("Can only multiply strings by integers")
return String(self.value * int(other.value), quotes=self.quotes)
def _escape_character(self, match):
"""Given a single character, return it appropriately CSS-escaped."""
# TODO is there any case where we'd want to use unicode escaping?
# TODO unsure if this works with newlines
return '\\' + match.group(0)
def _is_name_start(self, ch):
if ch == '_':
return True
if ord(ch) >= 128:
return True
if ch in string.ascii_letters:
return True
return False
def render(self, compress=False):
# TODO should preserve original literals here too -- even the quotes.
# or at least that's what sass does.
# Escape and add quotes as appropriate.
if self.quotes is None:
# If you deliberately construct a bareword with bogus CSS in it,
# you're assumed to know what you're doing
return self.value
else:
return self._render_quoted()
def render_interpolated(self, compress=False):
# Always render without quotes
return self.value
def _render_bareword(self):
# TODO this is currently unused, and only implemented due to an
# oversight, but would make for a much better implementation of
# escape()
# This is a bareword, so almost anything outside \w needs escaping
ret = self.value
ret = self.bad_identifier_rx.sub(self._escape_character, ret)
# Also apply some minor quibbling rules about how barewords can
# start: with a "name start", an escape, a hyphen followed by one
# of those, or two hyphens.
if not ret:
# TODO is an unquoted empty string allowed to be rendered?
pass
elif ret[0] == '-':
if ret[1] in '-\\' or self._is_name_start(ret[1]):
pass
else:
# Escape the second character
# TODO what if it's a digit, oops
ret = ret[0] + '\\' + ret[1:]
elif ret[0] == '\\' or self._is_name_start(ret[0]):
pass
else:
# Escape the first character
# TODO what if it's a digit, oops
ret = '\\' + ret
return ret
def _render_quoted(self):
# Strictly speaking, the only things we need to quote are the quotes
# themselves, backslashes, and newlines.
# TODO Ruby Sass takes backslashes in barewords literally, but treats
# backslashes in quoted strings as escapes -- their mistake?
# TODO In Ruby Sass, generated strings never have single quotes -- but
# neither do variable interpolations, so I'm not sure what they're
# doing
quote = self.quotes
ret = self.value
ret = ret.replace('\\', '\\\\')
ret = ret.replace(quote, '\\' + quote)
# Note that a literal newline is ignored when escaped, so we have to
# use the codepoint instead. But we'll leave the newline as well, to
# aid readability.
ret = ret.replace('\n', '\\a\\\n')
return quote + ret + quote
# TODO this needs to pretend the url(...) is part of the string for all string
# operations -- even the quotes! alas.
# TODO recasting a function to a String will lose the function part? whoops.
# maybe .value should just be, uh, the literal value instead of the insides???
class Function(String):
"""Function call pseudo-type, which crops up frequently in CSS as a string
marker. Acts mostly like a string, but has a function name and parentheses
around it.
"""
def __init__(self, string, function_name, quotes='"', literal=False):
super(Function, self).__init__(string, quotes=quotes, literal=literal)
self.function_name = function_name
def render(self, compress=False):
return "{0}({1})".format(
self.function_name,
super(Function, self).render(compress),
)
def render_interpolated(self, compress=False):
return "{0}({1})".format(
self.function_name,
super(Function, self).render_interpolated(compress),
)
class Url(Function):
# Bare URLs may not contain quotes, parentheses, or unprintables. Quoted
# URLs may, of course, contain whatever they like.
# Ref: http://dev.w3.org/csswg/css-syntax-3/#consume-a-url-token0
bad_identifier_rx = re.compile("[$'\"()\\x00-\\x08\\x0b\\x0e-\\x1f\\x7f]")
def __init__(self, string, **kwargs):
super(Url, self).__init__(string, 'url', **kwargs)
def render(self, compress=False):
if self.quotes is None:
return self.render_interpolated(compress)
else:
inside = self._render_quoted()
return "url(" + inside + ")"
def render_interpolated(self, compress=False):
# Always render without quotes.
# When doing that, we need to escape some stuff to make sure the result
# is valid CSS.
inside = self.bad_identifier_rx.sub(
self._escape_character, self.value)
return "url(" + inside + ")"
class Map(Value):
sass_type_name = 'map'
def __init__(self, pairs, index=None):
self.pairs = tuple(pairs)
if index is None:
self.index = {}
for key, value in pairs:
self.index[key] = value
else:
self.index = index
def __repr__(self):
return "<Map: (%s)>" % (", ".join("%s: %s" % pair for pair in self.pairs),)
def __hash__(self):
return hash(self.pairs)
def __len__(self):
return len(self.pairs)
def __iter__(self):
return iter(self.pairs)
def __getitem__(self, index):
return List(self.pairs[index], use_comma=True)
def __eq__(self, other):
try:
return self.pairs == other.to_pairs()
except ValueError:
return NotImplemented
def to_dict(self):
return self.index
def to_pairs(self):
return self.pairs
def render(self, compress=False):
raise TypeError("Cannot render map %r as CSS" % (self,))
def expect_type(value, types, unit=any):
if not isinstance(value, types):
if isinstance(types, type):
types = (type,)
sass_type_names = list(set(t.sass_type_name for t in types))
sass_type_names.sort()
# Join with commas in English fashion
if len(sass_type_names) == 1:
sass_type = sass_type_names[0]
elif len(sass_type_names) == 2:
sass_type = ' or '.join(sass_type_names)
else:
sass_type = ', '.join(sass_type_names[:-1])
sass_type += ', or ' + sass_type_names[-1]
raise TypeError("Expected %s, got %r" % (sass_type, value))
if unit is not any and isinstance(value, Number):
if unit is None and not value.is_unitless:
raise ValueError("Expected unitless number, got %r" % (value,))
elif unit == '%' and not (
value.is_unitless or value.is_simple_unit('%')):
raise ValueError("Expected unitless number or percentage, got %r" % (value,))
| |
import os
import warnings
from datetime import date
from datetime import datetime
from os.path import isfile
from os.path import join
from os.path import samefile
from .config import Configuration
from .scm_workdir import Workdir
from .utils import do_ex
from .utils import require_command
from .utils import trace
from .version import meta
# If testing command in shell make sure to quote the match argument like
# '*[0-9]*' as it will expand before being sent to git if there are any matching
# files in current directory.
DEFAULT_DESCRIBE = [
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--match",
"*[0-9]*",
]
class GitWorkdir(Workdir):
"""experimental, may change at any time"""
COMMAND = "git"
@classmethod
def from_potential_worktree(cls, wd):
require_command(cls.COMMAND)
wd = os.path.abspath(wd)
real_wd, _, ret = do_ex("git rev-parse --show-prefix", wd)
real_wd = real_wd[:-1] # remove the trailing pathsep
if ret:
return
if not real_wd:
real_wd = wd
else:
assert wd.replace("\\", "/").endswith(real_wd)
# In windows wd contains ``\`` which should be replaced by ``/``
# for this assertion to work. Length of string isn't changed by replace
# ``\\`` is just and escape for `\`
real_wd = wd[: -len(real_wd)]
trace("real root", real_wd)
if not samefile(real_wd, wd):
return
return cls(real_wd)
def is_dirty(self):
out, _, _ = self.do_ex("git status --porcelain --untracked-files=no")
return bool(out)
def get_branch(self):
branch, err, ret = self.do_ex("git rev-parse --abbrev-ref HEAD")
if ret:
trace("branch err", branch, err, ret)
branch, err, ret = self.do_ex("git symbolic-ref --short HEAD")
if ret:
trace("branch err (symbolic-ref)", branch, err, ret)
branch = None
return branch
def get_head_date(self):
timestamp, err, ret = self.do_ex("git log -n 1 HEAD --format=%cI")
if ret:
trace("timestamp err", timestamp, err, ret)
return
# TODO, when dropping python3.6 use fromiso
date_part = timestamp.split("T")[0]
if "%c" in date_part:
trace("git too old -> timestamp is ", timestamp)
return None
return datetime.strptime(date_part, r"%Y-%m-%d").date()
def is_shallow(self):
return isfile(join(self.path, ".git/shallow"))
def fetch_shallow(self):
self.do_ex("git fetch --unshallow")
def node(self):
node, _, ret = self.do_ex("git rev-parse --verify --quiet HEAD")
if not ret:
return node[:7]
def count_all_nodes(self):
revs, _, _ = self.do_ex("git rev-list HEAD")
return revs.count("\n") + 1
def default_describe(self):
return self.do_ex(DEFAULT_DESCRIBE)
def warn_on_shallow(wd):
"""experimental, may change at any time"""
if wd.is_shallow():
warnings.warn(f'"{wd.path}" is shallow and may cause errors')
def fetch_on_shallow(wd):
"""experimental, may change at any time"""
if wd.is_shallow():
warnings.warn(f'"{wd.path}" was shallow, git fetch was used to rectify')
wd.fetch_shallow()
def fail_on_shallow(wd):
"""experimental, may change at any time"""
if wd.is_shallow():
raise ValueError(
f'{wd.path} is shallow, please correct with "git fetch --unshallow"'
)
def get_working_directory(config):
"""
Return the working directory (``GitWorkdir``).
"""
if config.parent:
return GitWorkdir.from_potential_worktree(config.parent)
if config.search_parent_directories:
return search_parent(config.absolute_root)
return GitWorkdir.from_potential_worktree(config.absolute_root)
def parse(root, describe_command=None, pre_parse=warn_on_shallow, config=None):
"""
:param pre_parse: experimental pre_parse action, may change at any time
"""
if not config:
config = Configuration(root=root)
wd = get_working_directory(config)
if wd:
return _git_parse_inner(
config, wd, describe_command=describe_command, pre_parse=pre_parse
)
def _git_parse_inner(config, wd, pre_parse=None, describe_command=None):
if pre_parse:
pre_parse(wd)
if config.git_describe_command is not None:
describe_command = config.git_describe_command
if describe_command is not None:
out, _, ret = wd.do_ex(describe_command)
else:
out, _, ret = wd.default_describe()
if ret == 0:
tag, distance, node, dirty = _git_parse_describe(out)
if distance == 0 and not dirty:
distance = None
else:
# If 'git git_describe_command' failed, try to get the information otherwise.
tag = "0.0"
node = wd.node()
if node is None:
distance = 0
else:
distance = wd.count_all_nodes()
node = "g" + node
dirty = wd.is_dirty()
branch = wd.get_branch()
node_date = wd.get_head_date() or date.today()
return meta(
tag,
branch=branch,
node=node,
node_date=node_date,
distance=distance,
dirty=dirty,
config=config,
)
def _git_parse_describe(describe_output):
# 'describe_output' looks e.g. like 'v1.5.0-0-g4060507' or
# 'v1.15.1rc1-37-g9bd1298-dirty'.
if describe_output.endswith("-dirty"):
dirty = True
describe_output = describe_output[:-6]
else:
dirty = False
tag, number, node = describe_output.rsplit("-", 2)
number = int(number)
return tag, number, node, dirty
def search_parent(dirname):
"""
Walk up the path to find the `.git` directory.
:param dirname: Directory from which to start searching.
"""
# Code based on:
# https://github.com/gitpython-developers/GitPython/blob/main/git/repo/base.py
curpath = os.path.abspath(dirname)
while curpath:
try:
wd = GitWorkdir.from_potential_worktree(curpath)
except Exception:
wd = None
if wd is not None:
return wd
curpath, tail = os.path.split(curpath)
if not tail:
return None
| |
"""llvm
Tool-specific initialization for LLVM
"""
#
# Copyright (c) 2009 VMware, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import os.path
import re
import sys
import distutils.version
import SCons.Errors
import SCons.Util
def generate(env):
env['llvm'] = False
try:
llvm_dir = os.environ['LLVM']
except KeyError:
# Do nothing -- use the system headers/libs
llvm_dir = None
else:
if not os.path.isdir(llvm_dir):
raise SCons.Errors.InternalError, "Specified LLVM directory not found"
if env['debug']:
llvm_subdir = 'Debug'
else:
llvm_subdir = 'Release'
llvm_bin_dir = os.path.join(llvm_dir, llvm_subdir, 'bin')
if not os.path.isdir(llvm_bin_dir):
llvm_bin_dir = os.path.join(llvm_dir, 'bin')
if not os.path.isdir(llvm_bin_dir):
raise SCons.Errors.InternalError, "LLVM binary directory not found"
env.PrependENVPath('PATH', llvm_bin_dir)
if env['platform'] == 'windows':
# XXX: There is no llvm-config on Windows, so assume a standard layout
if llvm_dir is None:
print 'scons: LLVM environment variable must be specified when building for windows'
return
# Try to determine the LLVM version from llvm/Config/config.h
llvm_config = os.path.join(llvm_dir, 'include/llvm/Config/config.h')
if not os.path.exists(llvm_config):
print 'scons: could not find %s' % llvm_config
return
llvm_version_re = re.compile(r'^#define PACKAGE_VERSION "([^"]*)"')
llvm_version = None
for line in open(llvm_config, 'rt'):
mo = llvm_version_re.match(line)
if mo:
llvm_version = mo.group(1)
llvm_version = distutils.version.LooseVersion(llvm_version)
break
if llvm_version is None:
print 'scons: could not determine the LLVM version from %s' % llvm_config
return
env.Prepend(CPPPATH = [os.path.join(llvm_dir, 'include')])
env.AppendUnique(CPPDEFINES = [
'__STDC_LIMIT_MACROS',
'__STDC_CONSTANT_MACROS',
'HAVE_STDINT_H',
])
env.Prepend(LIBPATH = [os.path.join(llvm_dir, 'lib')])
if llvm_version >= distutils.version.LooseVersion('3.2'):
# 3.2
env.Prepend(LIBS = [
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMX86Desc', 'LLVMSelectionDAG',
'LLVMAsmPrinter', 'LLVMMCParser', 'LLVMX86AsmPrinter',
'LLVMX86Utils', 'LLVMX86Info', 'LLVMJIT',
'LLVMExecutionEngine', 'LLVMCodeGen', 'LLVMScalarOpts',
'LLVMInstCombine', 'LLVMTransformUtils', 'LLVMipa',
'LLVMAnalysis', 'LLVMTarget', 'LLVMMC', 'LLVMCore',
'LLVMSupport', 'LLVMRuntimeDyld', 'LLVMObject'
])
elif llvm_version >= distutils.version.LooseVersion('3.0'):
# 3.0
env.Prepend(LIBS = [
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMX86Desc', 'LLVMSelectionDAG',
'LLVMAsmPrinter', 'LLVMMCParser', 'LLVMX86AsmPrinter',
'LLVMX86Utils', 'LLVMX86Info', 'LLVMJIT',
'LLVMExecutionEngine', 'LLVMCodeGen', 'LLVMScalarOpts',
'LLVMInstCombine', 'LLVMTransformUtils', 'LLVMipa',
'LLVMAnalysis', 'LLVMTarget', 'LLVMMC', 'LLVMCore',
'LLVMSupport'
])
elif llvm_version >= distutils.version.LooseVersion('2.9'):
# 2.9
env.Prepend(LIBS = [
'LLVMObject', 'LLVMMCJIT', 'LLVMMCDisassembler',
'LLVMLinker', 'LLVMipo', 'LLVMInterpreter',
'LLVMInstrumentation', 'LLVMJIT', 'LLVMExecutionEngine',
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMMCParser', 'LLVMX86AsmPrinter', 'LLVMX86CodeGen',
'LLVMSelectionDAG', 'LLVMX86Utils', 'LLVMX86Info', 'LLVMAsmPrinter',
'LLVMCodeGen', 'LLVMScalarOpts', 'LLVMInstCombine',
'LLVMTransformUtils', 'LLVMipa', 'LLVMAsmParser',
'LLVMArchive', 'LLVMBitReader', 'LLVMAnalysis', 'LLVMTarget',
'LLVMCore', 'LLVMMC', 'LLVMSupport',
])
elif llvm_version >= distutils.version.LooseVersion('2.7'):
# 2.7
env.Prepend(LIBS = [
'LLVMLinker', 'LLVMipo', 'LLVMInterpreter',
'LLVMInstrumentation', 'LLVMJIT', 'LLVMExecutionEngine',
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMMCParser', 'LLVMX86AsmPrinter', 'LLVMX86CodeGen',
'LLVMSelectionDAG', 'LLVMX86Info', 'LLVMAsmPrinter',
'LLVMCodeGen', 'LLVMScalarOpts', 'LLVMInstCombine',
'LLVMTransformUtils', 'LLVMipa', 'LLVMAsmParser',
'LLVMArchive', 'LLVMBitReader', 'LLVMAnalysis', 'LLVMTarget',
'LLVMMC', 'LLVMCore', 'LLVMSupport', 'LLVMSystem',
])
else:
# 2.6
env.Prepend(LIBS = [
'LLVMX86AsmParser', 'LLVMX86AsmPrinter', 'LLVMX86CodeGen',
'LLVMX86Info', 'LLVMLinker', 'LLVMipo', 'LLVMInterpreter',
'LLVMInstrumentation', 'LLVMJIT', 'LLVMExecutionEngine',
'LLVMDebugger', 'LLVMBitWriter', 'LLVMAsmParser',
'LLVMArchive', 'LLVMBitReader', 'LLVMSelectionDAG',
'LLVMAsmPrinter', 'LLVMCodeGen', 'LLVMScalarOpts',
'LLVMTransformUtils', 'LLVMipa', 'LLVMAnalysis',
'LLVMTarget', 'LLVMMC', 'LLVMCore', 'LLVMSupport',
'LLVMSystem',
])
env.Append(LIBS = [
'imagehlp',
'psapi',
'shell32',
'advapi32'
])
if env['msvc']:
# Some of the LLVM C headers use the inline keyword without
# defining it.
env.Append(CPPDEFINES = [('inline', '__inline')])
if env['build'] in ('debug', 'checked'):
# LLVM libraries are static, build with /MT, and they
# automatically link agains LIBCMT. When we're doing a
# debug build we'll be linking against LIBCMTD, so disable
# that.
env.Append(LINKFLAGS = ['/nodefaultlib:LIBCMT'])
else:
if not env.Detect('llvm-config'):
print 'scons: llvm-config script not found'
return
llvm_version = env.backtick('llvm-config --version').rstrip()
llvm_version = distutils.version.LooseVersion(llvm_version)
try:
# Treat --cppflags specially to prevent NDEBUG from disabling
# assertion failures in debug builds.
cppflags = env.ParseFlags('!llvm-config --cppflags')
try:
cppflags['CPPDEFINES'].remove('NDEBUG')
except ValueError:
pass
env.MergeFlags(cppflags)
components = ['engine', 'bitwriter', 'x86asmprinter']
if llvm_version >= distutils.version.LooseVersion('3.1'):
components.append('mcjit')
if llvm_version >= distutils.version.LooseVersion('3.2'):
env.Append(CXXFLAGS = ('-fno-rtti',))
env.ParseConfig('llvm-config --libs ' + ' '.join(components))
env.ParseConfig('llvm-config --ldflags')
except OSError:
print 'scons: llvm-config version %s failed' % llvm_version
return
assert llvm_version is not None
env['llvm'] = True
print 'scons: Found LLVM version %s' % llvm_version
env['LLVM_VERSION'] = llvm_version
# Define HAVE_LLVM macro with the major/minor version number (e.g., 0x0206 for 2.6)
llvm_version_major = int(llvm_version.version[0])
llvm_version_minor = int(llvm_version.version[1])
llvm_version_hex = '0x%02x%02x' % (llvm_version_major, llvm_version_minor)
env.Prepend(CPPDEFINES = [('HAVE_LLVM', llvm_version_hex)])
def exists(env):
return True
# vim:set ts=4 sw=4 et:
| |
# Copyright (c) 2017-2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from cloudify.state import current_ctx
from cloudify.exceptions import NonRecoverableError
from cloudify_rest_client.exceptions import CloudifyClientError
from .client_mock import MockCloudifyRestClient
from .base import DeploymentProxyTestBase
from ..tasks import create_deployment, delete_deployment
from cloudify_deployment_proxy import DeploymentProxyBase
REST_CLIENT_EXCEPTION = \
mock.MagicMock(side_effect=CloudifyClientError('Mistake'))
class TestDeployment(DeploymentProxyTestBase):
sleep_mock = None
def setUp(self):
super(TestDeployment, self).setUp()
mock_sleep = mock.MagicMock()
self.sleep_mock = mock.patch('time.sleep', mock_sleep)
self.sleep_mock.start()
self.total_patch = \
mock.patch('cloudify_rest_client.responses.Pagination.total',
new_callable=mock.PropertyMock)
self.total_patch = self.total_patch.start()
self.total_patch.return_value = 1
self.offset_patch = \
mock.patch('cloudify_rest_client.responses.Pagination.offset',
new_callable=mock.PropertyMock)
self.offset_patch = self.offset_patch.start()
self.offset_patch.return_value = 1
def tearDown(self):
if self.sleep_mock:
self.sleep_mock.stop()
self.sleep_mock = None
self.offset_patch.stop()
self.total_patch.stop()
super(TestDeployment, self).tearDown()
def test_delete_deployment_rest_client_error(self):
test_name = 'test_delete_deployment_rest_client_error'
_ctx = self.get_mock_ctx(test_name)
current_ctx.set(_ctx)
_ctx.instance.runtime_properties['deployment'] = {}
_ctx.instance.runtime_properties['deployment']['id'] = test_name
# Tests that deployments delete fails on rest client error
with mock.patch('cloudify.manager.get_rest_client') as mock_client:
cfy_mock_client = MockCloudifyRestClient()
cfy_mock_client.deployments.delete = REST_CLIENT_EXCEPTION
mock_client.return_value = cfy_mock_client
error = self.assertRaises(NonRecoverableError,
delete_deployment,
deployment_id=test_name,
timeout=.01)
self.assertIn('action delete failed',
error.message)
def test_upload_plugins(self):
# Tests that deployments upload plugins
test_name = 'test_delete_deployment_success'
_ctx = self.get_mock_ctx(test_name)
current_ctx.set(_ctx)
get_local_path = mock.Mock(return_value="some_path")
with mock.patch('cloudify.manager.get_rest_client') as mock_client:
plugin = mock.Mock()
plugin.id = "CustomPlugin"
cfy_mock_client = MockCloudifyRestClient()
cfy_mock_client.plugins.upload = mock.Mock(return_value=plugin)
mock_client.return_value = cfy_mock_client
with mock.patch(
'cloudify_deployment_proxy.get_local_path',
get_local_path
):
zip_files = mock.Mock(return_value="_zip")
with mock.patch(
'cloudify_deployment_proxy.zip_files',
zip_files
):
# empty plugins
deployment = DeploymentProxyBase({'plugins': []})
deployment._upload_plugins()
zip_files.assert_not_called()
get_local_path.assert_not_called()
# dist of plugins
deployment = DeploymentProxyBase({'plugins': {
'base_plugin': {
'wagon_path': '_wagon_path',
'plugin_yaml_path': '_plugin_yaml_path'}}})
os_mock = mock.Mock()
with mock.patch('cloudify_deployment_proxy.os', os_mock):
deployment._upload_plugins()
zip_files.assert_called_with(["some_path", "some_path"])
get_local_path.assert_has_calls([
mock.call('_wagon_path', create_temp=True),
mock.call('_plugin_yaml_path', create_temp=True)])
os_mock.remove.assert_has_calls([
mock.call('some_path'),
mock.call('some_path'),
mock.call('_zip')])
get_local_path = mock.Mock(return_value="some_path")
zip_files = mock.Mock(return_value="_zip")
with mock.patch(
'cloudify_deployment_proxy.get_local_path',
get_local_path
):
zip_files = mock.Mock(return_value="_zip")
with mock.patch(
'cloudify_deployment_proxy.zip_files',
zip_files
):
# list of plugins
deployment = DeploymentProxyBase({'plugins': [{
'wagon_path': '_wagon_path',
'plugin_yaml_path': '_plugin_yaml_path'}]})
os_mock = mock.Mock()
with mock.patch('cloudify_deployment_proxy.os', os_mock):
deployment._upload_plugins()
zip_files.assert_called_with(["some_path", "some_path"])
get_local_path.assert_has_calls([
mock.call('_wagon_path', create_temp=True),
mock.call('_plugin_yaml_path', create_temp=True)])
os_mock.remove.assert_has_calls([
mock.call('some_path'),
mock.call('some_path'),
mock.call('_zip')])
# raise error if wrong plugins list
deployment = DeploymentProxyBase({'plugins': True})
error = self.assertRaises(NonRecoverableError,
deployment._upload_plugins)
self.assertIn('Wrong type in plugins: True',
error.message)
# raise error if wrong wagon/yaml values
deployment = DeploymentProxyBase({'plugins': [{
'wagon_path': '',
'plugin_yaml_path': ''}]})
error = self.assertRaises(NonRecoverableError,
deployment._upload_plugins)
self.assertIn("You should provide both values wagon_path: '' "
"and plugin_yaml_path: ''", error.message)
def test_delete_deployment_success(self):
# Tests that deployments delete succeeds
test_name = 'test_delete_deployment_success'
_ctx = self.get_mock_ctx(test_name)
current_ctx.set(_ctx)
_ctx.instance.runtime_properties['deployment'] = {}
_ctx.instance.runtime_properties['deployment']['id'] = test_name
_ctx.instance.runtime_properties['secrets'] = {'a': 'b'}
_ctx.instance.runtime_properties['plugins'] = ['plugin_id']
with mock.patch('cloudify.manager.get_rest_client') as mock_client:
cfy_mock_client = MockCloudifyRestClient()
cfy_mock_client.secrets.delete = mock.Mock()
cfy_mock_client.plugins.delete = mock.Mock()
mock_client.return_value = cfy_mock_client
poll_with_timeout_test = \
'cloudify_deployment_proxy.polling.poll_with_timeout'
with mock.patch(poll_with_timeout_test) as poll:
poll.return_value = True
output = delete_deployment(
operation='delete_deployment',
deployment_id='test_deployments_delete',
timeout=.001)
self.assertTrue(output)
cfy_mock_client.secrets.delete.assert_called_with(key='a')
cfy_mock_client.plugins.delete.assert_called_with(
plugin_id='plugin_id')
def test_delete_deployment_any_dep_by_id(self):
# Tests that deployments runs any_dep_by_id
test_name = 'test_delete_deployment_any_dep_by_id'
_ctx = self.get_mock_ctx(test_name)
current_ctx.set(_ctx)
_ctx.instance.runtime_properties['deployment'] = {}
_ctx.instance.runtime_properties['deployment']['id'] = test_name
with mock.patch('cloudify.manager.get_rest_client') as mock_client:
mock_client.return_value = MockCloudifyRestClient()
_ctx.instance.runtime_properties['deployment'] = {}
_ctx.instance.runtime_properties['deployment']['id'] = test_name
output = delete_deployment(
operation='delete_deployment',
deployment_id='test_deployments_delete',
timeout=.01)
self.assertTrue(output)
def test_create_deployment_rest_client_error(self):
# Tests that deployments create fails on rest client error
test_name = 'test_create_deployment_rest_client_error'
_ctx = self.get_mock_ctx(test_name)
current_ctx.set(_ctx)
_ctx.instance.runtime_properties['deployment'] = {}
_ctx.instance.runtime_properties['deployment']['id'] = test_name
_ctx.instance.runtime_properties['deployment']['outputs'] = {}
with mock.patch('cloudify.manager.get_rest_client') as mock_client:
cfy_mock_client = MockCloudifyRestClient()
cfy_mock_client.deployments.create = REST_CLIENT_EXCEPTION
mock_client.return_value = cfy_mock_client
error = self.assertRaises(NonRecoverableError,
create_deployment,
deployment_id='test_deployments_create',
blueprint_id='test_deployments_create',
timeout=.01)
self.assertIn('action create failed',
error.message)
def test_create_deployment_timeout(self):
# Tests that deployments create fails on timeout
test_name = 'test_create_deployment_timeout'
_ctx = self.get_mock_ctx(test_name)
current_ctx.set(_ctx)
_ctx.instance.runtime_properties['deployment'] = {}
_ctx.instance.runtime_properties['deployment']['id'] = test_name
_ctx.instance.runtime_properties['deployment']['outputs'] = {}
with mock.patch('cloudify.manager.get_rest_client') as mock_client:
cfy_mock_client = MockCloudifyRestClient()
list_response = cfy_mock_client.executions.list()
list_response[0]['id'] = 'exec_id'
list_response[0]['workflow_id'] = 'create_deployment_environment'
list_response[0]['deployment_id'] =\
'test_create_deployment_timeout'
def mock_return(*args, **kwargs):
del args, kwargs
return list_response
poll_with_timeout_test = \
'cloudify_deployment_proxy.polling.poll_with_timeout'
cfy_mock_client.executions.list = mock_return
mock_client.return_value = cfy_mock_client
with mock.patch(poll_with_timeout_test) as poll:
poll.return_value = False
error = self.assertRaises(
NonRecoverableError, create_deployment,
deployment_id='test_create_deployment_timeout',
blueprint_id='test', timeout=.01)
self.assertIn('Execution timeout', error.message)
def test_create_deployment_success(self):
# Tests that create deployment succeeds
test_name = 'test_create_deployment_success'
_ctx = self.get_mock_ctx(test_name)
current_ctx.set(_ctx)
_ctx.node.properties['secrets'] = {'a': 'b'}
with mock.patch('cloudify.manager.get_rest_client') as mock_client:
cfy_mock_client = MockCloudifyRestClient()
list_response = cfy_mock_client.executions.list()
list_response[0]['id'] = 'exec_id'
list_response[0]['workflow_id'] = 'create_deployment_environment'
list_response[0]['deployment_id'] =\
'test_create_deployment_success'
def mock_return(*args, **kwargs):
del args, kwargs
return list_response
poll_with_timeout_test = \
'cloudify_deployment_proxy.polling.poll_with_timeout'
cfy_mock_client.executions.list = mock_return
cfy_mock_client.secrets.create = mock.Mock()
mock_client.return_value = cfy_mock_client
with mock.patch(poll_with_timeout_test) as poll:
poll.return_value = True
output = create_deployment(operation='create_deployment',
timeout=.01)
self.assertTrue(output)
cfy_mock_client.secrets.create.assert_called_with(key='a',
value='b')
def test_create_deployment_exists(self):
# Tests that create deployment exists
test_name = 'test_create_deployment_exists'
_ctx = self.get_mock_ctx(test_name)
current_ctx.set(_ctx)
with mock.patch('cloudify.manager.get_rest_client') as mock_client:
cfy_mock_client = MockCloudifyRestClient()
list_response = cfy_mock_client.deployments.list()
list_response[0]['id'] = test_name
def mock_return(*args, **kwargs):
del args, kwargs
return list_response
cfy_mock_client.deployments.list = mock_return
mock_client.return_value = cfy_mock_client
output = create_deployment(operation='create_deployment',
timeout=.01)
self.assertFalse(output)
| |
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import binascii
import unittest
import os
import boto
# For an issue with venv and distutils, disable pylint message here
# pylint: disable-msg=E0611,F0401
from distutils.version import StrictVersion
import six
from six.moves import zip, zip_longest
import test.functional as tf
from swift.common.middleware.s3api.etree import fromstring, tostring, \
Element, SubElement
from swift.common.middleware.s3api.utils import mktime
from swift.common.utils import md5
from test.functional.s3api import S3ApiBase
from test.functional.s3api.s3_test_client import Connection
from test.functional.s3api.utils import get_error_code, get_error_msg, \
calculate_md5
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestS3ApiMultiUpload(S3ApiBase):
def setUp(self):
super(TestS3ApiMultiUpload, self).setUp()
if not tf.cluster_info['s3api'].get('allow_multipart_uploads', False):
raise tf.SkipTest('multipart upload is not enebled')
self.min_segment_size = int(tf.cluster_info['s3api'].get(
'min_segment_size', 5242880))
def _gen_comp_xml(self, etags, step=1):
elem = Element('CompleteMultipartUpload')
for i, etag in enumerate(etags):
elem_part = SubElement(elem, 'Part')
SubElement(elem_part, 'PartNumber').text = str(i * step + 1)
SubElement(elem_part, 'ETag').text = etag
return tostring(elem)
def _initiate_multi_uploads_result_generator(self, bucket, keys,
headers=None, trials=1):
if headers is None:
headers = [None] * len(keys)
self.conn.make_request('PUT', bucket)
query = 'uploads'
for key, key_headers in zip_longest(keys, headers):
for i in range(trials):
status, resp_headers, body = \
self.conn.make_request('POST', bucket, key,
headers=key_headers, query=query)
yield status, resp_headers, body
def _upload_part(self, bucket, key, upload_id, content=None, part_num=1):
query = 'partNumber=%s&uploadId=%s' % (part_num, upload_id)
content = content if content else b'a' * self.min_segment_size
with self.quiet_boto_logging():
status, headers, body = self.conn.make_request(
'PUT', bucket, key, body=content, query=query)
return status, headers, body
def _upload_part_copy(self, src_bucket, src_obj, dst_bucket, dst_key,
upload_id, part_num=1, src_range=None,
src_version_id=None):
src_path = '%s/%s' % (src_bucket, src_obj)
if src_version_id:
src_path += '?versionId=%s' % src_version_id
query = 'partNumber=%s&uploadId=%s' % (part_num, upload_id)
req_headers = {'X-Amz-Copy-Source': src_path}
if src_range:
req_headers['X-Amz-Copy-Source-Range'] = src_range
status, headers, body = \
self.conn.make_request('PUT', dst_bucket, dst_key,
headers=req_headers,
query=query)
elem = fromstring(body, 'CopyPartResult')
etag = elem.find('ETag').text.strip('"')
return status, headers, body, etag
def _complete_multi_upload(self, bucket, key, upload_id, xml):
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
return status, headers, body
def test_object_multi_upload(self):
bucket = 'bucket'
keys = [u'obj1\N{SNOWMAN}', 'obj2', 'obj3']
bad_content_md5 = base64.b64encode(b'a' * 16).strip().decode('ascii')
headers = [{'Content-Type': 'foo/bar', 'x-amz-meta-baz': 'quux'},
{'Content-MD5': bad_content_md5},
{'Etag': 'nonsense'}]
uploads = []
results_generator = self._initiate_multi_uploads_result_generator(
bucket, keys, headers=headers)
# Initiate Multipart Upload
for expected_key, (status, headers, body) in \
zip(keys, results_generator):
self.assertEqual(status, 200, body)
self.assertCommonResponseHeaders(headers)
self.assertIn('content-type', headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertIn('content-length', headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'InitiateMultipartUploadResult')
self.assertEqual(elem.find('Bucket').text, bucket)
key = elem.find('Key').text
if six.PY2:
expected_key = expected_key.encode('utf-8')
self.assertEqual(expected_key, key)
upload_id = elem.find('UploadId').text
self.assertIsNotNone(upload_id)
self.assertNotIn((key, upload_id), uploads)
uploads.append((key, upload_id))
self.assertEqual(len(uploads), len(keys)) # sanity
# List Multipart Uploads
query = 'uploads'
status, headers, body = \
self.conn.make_request('GET', bucket, query=query)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEqual(elem.find('Bucket').text, bucket)
self.assertIsNone(elem.find('KeyMarker').text)
self.assertEqual(elem.find('NextKeyMarker').text, uploads[-1][0])
self.assertIsNone(elem.find('UploadIdMarker').text)
self.assertEqual(elem.find('NextUploadIdMarker').text, uploads[-1][1])
self.assertEqual(elem.find('MaxUploads').text, '1000')
self.assertTrue(elem.find('EncodingType') is None)
self.assertEqual(elem.find('IsTruncated').text, 'false')
self.assertEqual(len(elem.findall('Upload')), 3)
for (expected_key, expected_upload_id), u in \
zip(uploads, elem.findall('Upload')):
key = u.find('Key').text
upload_id = u.find('UploadId').text
self.assertEqual(expected_key, key)
self.assertEqual(expected_upload_id, upload_id)
self.assertEqual(u.find('Initiator/ID').text,
self.conn.user_id)
self.assertEqual(u.find('Initiator/DisplayName').text,
self.conn.user_id)
self.assertEqual(u.find('Owner/ID').text, self.conn.user_id)
self.assertEqual(u.find('Owner/DisplayName').text,
self.conn.user_id)
self.assertEqual(u.find('StorageClass').text, 'STANDARD')
self.assertTrue(u.find('Initiated').text is not None)
# Upload Part
key, upload_id = uploads[0]
content = b'a' * self.min_segment_size
etag = md5(content, usedforsecurity=False).hexdigest()
status, headers, body = \
self._upload_part(bucket, key, upload_id, content)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers, etag)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '0')
expected_parts_list = [(headers['etag'], mktime(headers['date']))]
# Upload Part Copy
key, upload_id = uploads[1]
src_bucket = 'bucket2'
src_obj = 'obj3'
src_content = b'b' * self.min_segment_size
etag = md5(src_content, usedforsecurity=False).hexdigest()
# prepare src obj
self.conn.make_request('PUT', src_bucket)
with self.quiet_boto_logging():
self.conn.make_request('PUT', src_bucket, src_obj,
body=src_content)
_, headers, _ = self.conn.make_request('HEAD', src_bucket, src_obj)
self.assertCommonResponseHeaders(headers)
status, headers, body, resp_etag = \
self._upload_part_copy(src_bucket, src_obj, bucket,
key, upload_id)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
self.assertTrue('etag' not in headers)
elem = fromstring(body, 'CopyPartResult')
last_modified = elem.find('LastModified').text
self.assertTrue(last_modified is not None)
self.assertEqual(resp_etag, etag)
# Check last-modified timestamp
key, upload_id = uploads[1]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
self.assertEqual(200, status)
elem = fromstring(body, 'ListPartsResult')
# FIXME: COPY result drops milli/microseconds but GET doesn't
last_modified_gets = [p.find('LastModified').text
for p in elem.iterfind('Part')]
self.assertEqual(
last_modified_gets[0].rsplit('.', 1)[0],
last_modified.rsplit('.', 1)[0],
'%r != %r' % (last_modified_gets[0], last_modified))
# There should be *exactly* two parts in the result
self.assertEqual(1, len(last_modified_gets))
# List Parts
key, upload_id = uploads[0]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'ListPartsResult')
self.assertEqual(elem.find('Bucket').text, bucket)
self.assertEqual(elem.find('Key').text, key)
self.assertEqual(elem.find('UploadId').text, upload_id)
self.assertEqual(elem.find('Initiator/ID').text, self.conn.user_id)
self.assertEqual(elem.find('Initiator/DisplayName').text,
self.conn.user_id)
self.assertEqual(elem.find('Owner/ID').text, self.conn.user_id)
self.assertEqual(elem.find('Owner/DisplayName').text,
self.conn.user_id)
self.assertEqual(elem.find('StorageClass').text, 'STANDARD')
self.assertEqual(elem.find('PartNumberMarker').text, '0')
self.assertEqual(elem.find('NextPartNumberMarker').text, '1')
self.assertEqual(elem.find('MaxParts').text, '1000')
self.assertEqual(elem.find('IsTruncated').text, 'false')
self.assertEqual(len(elem.findall('Part')), 1)
# etags will be used to generate xml for Complete Multipart Upload
etags = []
for (expected_etag, expected_date), p in \
zip(expected_parts_list, elem.findall('Part')):
last_modified = p.find('LastModified').text
self.assertTrue(last_modified is not None)
# TODO: sanity check
# (kota_) How do we check the sanity?
# the last-modified header drops milli-seconds info
# by the constraint of the format.
# For now, we can do either the format check or round check
# last_modified_from_xml = mktime(last_modified)
# self.assertEqual(expected_date,
# last_modified_from_xml)
self.assertEqual(expected_etag, p.find('ETag').text)
self.assertEqual(self.min_segment_size, int(p.find('Size').text))
etags.append(p.find('ETag').text)
# Complete Multipart Upload
key, upload_id = uploads[0]
xml = self._gen_comp_xml(etags)
status, headers, body = \
self._complete_multi_upload(bucket, key, upload_id, xml)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertIn('content-type', headers)
self.assertEqual(headers['content-type'], 'application/xml')
if 'content-length' in headers:
self.assertEqual(headers['content-length'], str(len(body)))
else:
self.assertIn('transfer-encoding', headers)
self.assertEqual(headers['transfer-encoding'], 'chunked')
lines = body.split(b'\n')
self.assertTrue(lines[0].startswith(b'<?xml'), body)
self.assertTrue(lines[0].endswith(b'?>'), body)
elem = fromstring(body, 'CompleteMultipartUploadResult')
self.assertEqual(
'%s/bucket/obj1%%E2%%98%%83' %
tf.config['s3_storage_url'].rstrip('/'),
elem.find('Location').text)
self.assertEqual(elem.find('Bucket').text, bucket)
self.assertEqual(elem.find('Key').text, key)
concatted_etags = b''.join(
etag.strip('"').encode('ascii') for etag in etags)
exp_etag = '"%s-%s"' % (
md5(binascii.unhexlify(concatted_etags),
usedforsecurity=False).hexdigest(), len(etags))
etag = elem.find('ETag').text
self.assertEqual(etag, exp_etag)
exp_size = self.min_segment_size * len(etags)
status, headers, body = \
self.conn.make_request('HEAD', bucket, key)
self.assertEqual(status, 200)
self.assertEqual(headers['content-length'], str(exp_size))
self.assertEqual(headers['content-type'], 'foo/bar')
self.assertEqual(headers['x-amz-meta-baz'], 'quux')
swift_etag = '"%s"' % md5(
concatted_etags, usedforsecurity=False).hexdigest()
# TODO: GET via swift api, check against swift_etag
# Should be safe to retry
status, headers, body = \
self._complete_multi_upload(bucket, key, upload_id, xml)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertIn('content-type', headers)
self.assertEqual(headers['content-type'], 'application/xml')
if 'content-length' in headers:
self.assertEqual(headers['content-length'], str(len(body)))
else:
self.assertIn('transfer-encoding', headers)
self.assertEqual(headers['transfer-encoding'], 'chunked')
lines = body.split(b'\n')
self.assertTrue(lines[0].startswith(b'<?xml'), body)
self.assertTrue(lines[0].endswith(b'?>'), body)
elem = fromstring(body, 'CompleteMultipartUploadResult')
self.assertEqual(
'%s/bucket/obj1%%E2%%98%%83' %
tf.config['s3_storage_url'].rstrip('/'),
elem.find('Location').text)
self.assertEqual(elem.find('Bucket').text, bucket)
self.assertEqual(elem.find('Key').text, key)
self.assertEqual(elem.find('ETag').text, exp_etag)
status, headers, body = \
self.conn.make_request('HEAD', bucket, key)
self.assertEqual(status, 200)
self.assertEqual(headers['content-length'], str(exp_size))
self.assertEqual(headers['content-type'], 'foo/bar')
self.assertEqual(headers['x-amz-meta-baz'], 'quux')
# Upload Part Copy -- MU as source
key, upload_id = uploads[1]
status, headers, body, resp_etag = \
self._upload_part_copy(bucket, keys[0], bucket,
key, upload_id, part_num=2)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertIn('content-type', headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertIn('content-length', headers)
self.assertEqual(headers['content-length'], str(len(body)))
self.assertNotIn('etag', headers)
elem = fromstring(body, 'CopyPartResult')
last_modified = elem.find('LastModified').text
self.assertIsNotNone(last_modified)
exp_content = b'a' * self.min_segment_size
etag = md5(exp_content, usedforsecurity=False).hexdigest()
self.assertEqual(resp_etag, etag)
# Also check that the etag is correct in part listings
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'ListPartsResult')
self.assertEqual(len(elem.findall('Part')), 2)
self.assertEqual(elem.findall('Part')[1].find('PartNumber').text, '2')
self.assertEqual(elem.findall('Part')[1].find('ETag').text,
'"%s"' % etag)
# Abort Multipart Uploads
# note that uploads[1] has part data while uploads[2] does not
for key, upload_id in uploads[1:]:
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('DELETE', bucket, key, query=query)
self.assertEqual(status, 204)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'],
'text/html; charset=UTF-8')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '0')
# Check object
def check_obj(req_headers, exp_status):
status, headers, body = \
self.conn.make_request('HEAD', bucket, keys[0], req_headers)
self.assertEqual(status, exp_status)
self.assertCommonResponseHeaders(headers)
self.assertIn('content-length', headers)
if exp_status == 412:
self.assertNotIn('etag', headers)
self.assertEqual(headers['content-length'], '0')
else:
self.assertIn('etag', headers)
self.assertEqual(headers['etag'], exp_etag)
if exp_status == 304:
self.assertEqual(headers['content-length'], '0')
else:
self.assertEqual(headers['content-length'], str(exp_size))
check_obj({}, 200)
# Sanity check conditionals
check_obj({'If-Match': 'some other thing'}, 412)
check_obj({'If-None-Match': 'some other thing'}, 200)
# More interesting conditional cases
check_obj({'If-Match': exp_etag}, 200)
check_obj({'If-Match': swift_etag}, 412)
check_obj({'If-None-Match': swift_etag}, 200)
check_obj({'If-None-Match': exp_etag}, 304)
# Check listings
status, headers, body = self.conn.make_request('GET', bucket)
self.assertEqual(status, 200)
elem = fromstring(body, 'ListBucketResult')
resp_objects = list(elem.findall('./Contents'))
self.assertEqual(len(resp_objects), 1)
o = resp_objects[0]
if six.PY2:
expected_key = keys[0].encode('utf-8')
else:
expected_key = keys[0]
self.assertEqual(o.find('Key').text, expected_key)
self.assertIsNotNone(o.find('LastModified').text)
self.assertRegexpMatches(
o.find('LastModified').text,
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
self.assertEqual(o.find('ETag').text, exp_etag)
self.assertEqual(o.find('Size').text, str(exp_size))
self.assertIsNotNone(o.find('StorageClass').text)
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
self.assertEqual(o.find('Owner/DisplayName').text,
self.conn.user_id)
def test_initiate_multi_upload_error(self):
bucket = 'bucket'
key = 'obj'
self.conn.make_request('PUT', bucket)
query = 'uploads'
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('POST', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, resp_headers, body = \
self.conn.make_request('POST', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
status, resp_headers, body = self.conn.make_request(
'POST', bucket,
'x' * (tf.cluster_info['swift']['max_object_name_length'] + 1),
query=query)
self.assertEqual(get_error_code(body), 'KeyTooLongError')
def test_list_multi_uploads_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
query = 'uploads'
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('GET', bucket, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('GET', 'nothing', query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
def test_upload_part_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
query = 'uploads'
key = 'obj'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('PUT', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
query = 'partNumber=%s&uploadId=%s' % (1, 'nothing')
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
query = 'partNumber=%s&uploadId=%s' % (0, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'InvalidArgument')
err_msg = 'Part number must be an integer between 1 and'
self.assertTrue(err_msg in get_error_msg(body))
def test_upload_part_copy_error(self):
src_bucket = 'src'
src_obj = 'src'
self.conn.make_request('PUT', src_bucket)
self.conn.make_request('PUT', src_bucket, src_obj)
src_path = '%s/%s' % (src_bucket, src_obj)
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
key = 'obj'
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', bucket, key,
headers={
'X-Amz-Copy-Source': src_path
},
query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('PUT', 'nothing', key,
headers={'X-Amz-Copy-Source': src_path},
query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
query = 'partNumber=%s&uploadId=%s' % (1, 'nothing')
status, headers, body = \
self.conn.make_request('PUT', bucket, key,
headers={'X-Amz-Copy-Source': src_path},
query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
src_path = '%s/%s' % (src_bucket, 'nothing')
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key,
headers={'X-Amz-Copy-Source': src_path},
query=query)
self.assertEqual(get_error_code(body), 'NoSuchKey')
def test_list_parts_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
key = 'obj'
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'uploadId=%s' % upload_id
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('GET', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('GET', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
query = 'uploadId=%s' % 'nothing'
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
def test_abort_multi_upload_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
key = 'obj'
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
self._upload_part(bucket, key, upload_id)
query = 'uploadId=%s' % upload_id
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('DELETE', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('DELETE', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
status, headers, body = \
self.conn.make_request('DELETE', bucket, 'nothing', query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
query = 'uploadId=%s' % 'nothing'
status, headers, body = \
self.conn.make_request('DELETE', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
def test_complete_multi_upload_error(self):
bucket = 'bucket'
keys = ['obj', 'obj2']
self.conn.make_request('PUT', bucket)
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
for i in range(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, keys[0], query=query)
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
# part 1 too small
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# invalid credentials
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
# wrong/missing bucket
status, headers, body = \
self.conn.make_request('POST', 'nothing', keys[0], query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
# wrong upload ID
query = 'uploadId=%s' % 'nothing'
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
# without Part tag in xml
query = 'uploadId=%s' % upload_id
xml = self._gen_comp_xml([])
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'MalformedXML')
# with invalid etag in xml
invalid_etag = 'invalid'
xml = self._gen_comp_xml([invalid_etag])
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'InvalidPart')
# without part in Swift
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, keys[1], query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'uploadId=%s' % upload_id
xml = self._gen_comp_xml([etags[0]])
status, headers, body = \
self.conn.make_request('POST', bucket, keys[1], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'InvalidPart')
def test_complete_upload_min_segment_size(self):
bucket = 'bucket'
key = 'obj'
self.conn.make_request('PUT', bucket)
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
# multi parts with no body
etags = []
for i in range(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query)
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# multi parts with all parts less than min segment size
etags = []
for i in range(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body='AA')
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# one part and less than min segment size
etags = []
query = 'partNumber=1&uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body='AA')
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(status, 200)
# multi parts with all parts except the first part less than min
# segment size
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
body_size = [self.min_segment_size, self.min_segment_size - 1, 2]
for i in range(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body=b'A' * body_size[i])
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# multi parts with all parts except last part more than min segment
# size
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
body_size = [self.min_segment_size, self.min_segment_size, 2]
for i in range(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body=b'A' * body_size[i])
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(status, 200)
def test_complete_upload_with_fewer_etags(self):
bucket = 'bucket'
key = 'obj'
self.conn.make_request('PUT', bucket)
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
for i in range(1, 4):
query = 'partNumber=%s&uploadId=%s' % (2 * i - 1, upload_id)
status, headers, body = self.conn.make_request(
'PUT', bucket, key, body=b'A' * 1024 * 1024 * 5,
query=query)
etags.append(headers['etag'])
query = 'uploadId=%s' % upload_id
xml = self._gen_comp_xml(etags[:-1], step=2)
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(status, 200)
def test_object_multi_upload_part_copy_range(self):
bucket = 'bucket'
keys = ['obj1']
uploads = []
results_generator = self._initiate_multi_uploads_result_generator(
bucket, keys)
# Initiate Multipart Upload
for expected_key, (status, headers, body) in \
zip(keys, results_generator):
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'InitiateMultipartUploadResult')
self.assertEqual(elem.find('Bucket').text, bucket)
key = elem.find('Key').text
self.assertEqual(expected_key, key)
upload_id = elem.find('UploadId').text
self.assertTrue(upload_id is not None)
self.assertTrue((key, upload_id) not in uploads)
uploads.append((key, upload_id))
self.assertEqual(len(uploads), len(keys)) # sanity
# Upload Part Copy Range
key, upload_id = uploads[0]
src_bucket = 'bucket2'
src_obj = 'obj4'
src_content = b'y' * (self.min_segment_size // 2) + b'z' * \
self.min_segment_size
src_range = 'bytes=0-%d' % (self.min_segment_size - 1)
etag = md5(
src_content[:self.min_segment_size],
usedforsecurity=False).hexdigest()
# prepare src obj
self.conn.make_request('PUT', src_bucket)
self.conn.make_request('PUT', src_bucket, src_obj, body=src_content)
_, headers, _ = self.conn.make_request('HEAD', src_bucket, src_obj)
self.assertCommonResponseHeaders(headers)
status, headers, body, resp_etag = \
self._upload_part_copy(src_bucket, src_obj, bucket,
key, upload_id, 1, src_range)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
self.assertTrue('etag' not in headers)
elem = fromstring(body, 'CopyPartResult')
last_modified = elem.find('LastModified').text
self.assertTrue(last_modified is not None)
self.assertEqual(resp_etag, etag)
# Check last-modified timestamp
key, upload_id = uploads[0]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
elem = fromstring(body, 'ListPartsResult')
# FIXME: COPY result drops milli/microseconds but GET doesn't
last_modified_gets = [p.find('LastModified').text
for p in elem.iterfind('Part')]
self.assertEqual(
last_modified_gets[0].rsplit('.', 1)[0],
last_modified.rsplit('.', 1)[0],
'%r != %r' % (last_modified_gets[0], last_modified))
# There should be *exactly* one parts in the result
self.assertEqual(1, len(last_modified_gets))
# Abort Multipart Upload
key, upload_id = uploads[0]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('DELETE', bucket, key, query=query)
# sanity checks
self.assertEqual(status, 204)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '0')
def test_object_multi_upload_part_copy_version(self):
if 'object_versioning' not in tf.cluster_info:
self.skipTest('Object Versioning not enabled')
bucket = 'bucket'
keys = ['obj1']
uploads = []
results_generator = self._initiate_multi_uploads_result_generator(
bucket, keys)
# Initiate Multipart Upload
for expected_key, (status, headers, body) in \
zip(keys, results_generator):
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'InitiateMultipartUploadResult')
self.assertEqual(elem.find('Bucket').text, bucket)
key = elem.find('Key').text
self.assertEqual(expected_key, key)
upload_id = elem.find('UploadId').text
self.assertTrue(upload_id is not None)
self.assertTrue((key, upload_id) not in uploads)
uploads.append((key, upload_id))
self.assertEqual(len(uploads), len(keys)) # sanity
key, upload_id = uploads[0]
src_bucket = 'bucket2'
src_obj = 'obj4'
src_content = b'y' * (self.min_segment_size // 2) + b'z' * \
self.min_segment_size
etags = [md5(src_content, usedforsecurity=False).hexdigest()]
# prepare null-version src obj
self.conn.make_request('PUT', src_bucket)
self.conn.make_request('PUT', src_bucket, src_obj, body=src_content)
_, headers, _ = self.conn.make_request('HEAD', src_bucket, src_obj)
self.assertCommonResponseHeaders(headers)
# Turn on versioning
elem = Element('VersioningConfiguration')
SubElement(elem, 'Status').text = 'Enabled'
xml = tostring(elem)
status, headers, body = self.conn.make_request(
'PUT', src_bucket, body=xml, query='versioning')
self.assertEqual(status, 200)
src_obj2 = 'obj5'
src_content2 = b'stub'
etags.append(md5(src_content2, usedforsecurity=False).hexdigest())
# prepare src obj w/ real version
self.conn.make_request('PUT', src_bucket, src_obj2, body=src_content2)
_, headers, _ = self.conn.make_request('HEAD', src_bucket, src_obj2)
self.assertCommonResponseHeaders(headers)
version_id2 = headers['x-amz-version-id']
status, headers, body, resp_etag = \
self._upload_part_copy(src_bucket, src_obj, bucket,
key, upload_id, 1,
src_version_id='null')
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
self.assertTrue('etag' not in headers)
elem = fromstring(body, 'CopyPartResult')
last_modifieds = [elem.find('LastModified').text]
self.assertTrue(last_modifieds[0] is not None)
self.assertEqual(resp_etag, etags[0])
status, headers, body, resp_etag = \
self._upload_part_copy(src_bucket, src_obj2, bucket,
key, upload_id, 2,
src_version_id=version_id2)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
self.assertTrue('etag' not in headers)
elem = fromstring(body, 'CopyPartResult')
last_modifieds.append(elem.find('LastModified').text)
self.assertTrue(last_modifieds[1] is not None)
self.assertEqual(resp_etag, etags[1])
# Check last-modified timestamp
key, upload_id = uploads[0]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
elem = fromstring(body, 'ListPartsResult')
# FIXME: COPY result drops milli/microseconds but GET doesn't
last_modified_gets = [p.find('LastModified').text
for p in elem.iterfind('Part')]
self.assertEqual(
[lm.rsplit('.', 1)[0] for lm in last_modified_gets],
[lm.rsplit('.', 1)[0] for lm in last_modifieds])
# There should be *exactly* two parts in the result
self.assertEqual(2, len(last_modified_gets))
# Abort Multipart Upload
key, upload_id = uploads[0]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('DELETE', bucket, key, query=query)
# sanity checks
self.assertEqual(status, 204)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '0')
class TestS3ApiMultiUploadSigV4(TestS3ApiMultiUpload):
@classmethod
def setUpClass(cls):
os.environ['S3_USE_SIGV4'] = "True"
@classmethod
def tearDownClass(cls):
del os.environ['S3_USE_SIGV4']
def setUp(self):
super(TestS3ApiMultiUploadSigV4, self).setUp()
def test_object_multi_upload_part_copy_range(self):
if StrictVersion(boto.__version__) < StrictVersion('3.0'):
# boto 2 doesn't sort headers properly; see
# https://github.com/boto/boto/pull/3032
# or https://github.com/boto/boto/pull/3176
# or https://github.com/boto/boto/pull/3751
# or https://github.com/boto/boto/pull/3824
self.skipTest('This stuff got the issue of boto<=2.x')
def test_delete_bucket_multi_upload_object_exisiting(self):
bucket = 'bucket'
keys = ['obj1']
uploads = []
results_generator = self._initiate_multi_uploads_result_generator(
bucket, keys)
# Initiate Multipart Upload
for expected_key, (status, _, body) in \
zip(keys, results_generator):
self.assertEqual(status, 200) # sanity
elem = fromstring(body, 'InitiateMultipartUploadResult')
key = elem.find('Key').text
self.assertEqual(expected_key, key) # sanity
upload_id = elem.find('UploadId').text
self.assertTrue(upload_id is not None) # sanity
self.assertTrue((key, upload_id) not in uploads)
uploads.append((key, upload_id))
self.assertEqual(len(uploads), len(keys)) # sanity
# Upload Part
key, upload_id = uploads[0]
content = b'a' * self.min_segment_size
status, headers, body = \
self._upload_part(bucket, key, upload_id, content)
self.assertEqual(status, 200)
# Complete Multipart Upload
key, upload_id = uploads[0]
etags = [md5(content, usedforsecurity=False).hexdigest()]
xml = self._gen_comp_xml(etags)
status, headers, body = \
self._complete_multi_upload(bucket, key, upload_id, xml)
self.assertEqual(status, 200) # sanity
# GET multipart object
status, headers, body = \
self.conn.make_request('GET', bucket, key)
self.assertEqual(status, 200) # sanity
self.assertEqual(content, body) # sanity
# DELETE bucket while the object existing
status, headers, body = \
self.conn.make_request('DELETE', bucket)
self.assertEqual(status, 409) # sanity
# The object must still be there.
status, headers, body = \
self.conn.make_request('GET', bucket, key)
self.assertEqual(status, 200) # sanity
self.assertEqual(content, body) # sanity
# Can delete it with DeleteMultipleObjects request
elem = Element('Delete')
SubElement(elem, 'Quiet').text = 'true'
obj_elem = SubElement(elem, 'Object')
SubElement(obj_elem, 'Key').text = key
body = tostring(elem, use_s3ns=False)
status, headers, body = self.conn.make_request(
'POST', bucket, body=body, query='delete',
headers={'Content-MD5': calculate_md5(body)})
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
status, headers, body = \
self.conn.make_request('GET', bucket, key)
self.assertEqual(status, 404) # sanity
# Now we can delete
status, headers, body = \
self.conn.make_request('DELETE', bucket)
self.assertEqual(status, 204) # sanity
if __name__ == '__main__':
unittest.main()
| |
from django.template.loader import render_to_string
from debug_toolbar import settings as dt_settings
from debug_toolbar.utils import get_name_from_obj
class Panel:
"""
Base class for panels.
"""
def __init__(self, toolbar, get_response):
self.toolbar = toolbar
self.get_response = get_response
# Private panel properties
@property
def panel_id(self):
return self.__class__.__name__
@property
def enabled(self):
# Check to see if settings has a default value for it
disabled_panels = dt_settings.get_config()["DISABLE_PANELS"]
panel_path = get_name_from_obj(self)
# Some panels such as the SQLPanel and TemplatesPanel exist in a
# panel module, but can be disabled without panel in the path.
# For that reason, replace .panel. in the path and check for that
# value in the disabled panels as well.
disable_panel = (
panel_path in disabled_panels
or panel_path.replace(".panel.", ".") in disabled_panels
)
if disable_panel:
default = "off"
else:
default = "on"
# The user's cookies should override the default value
return self.toolbar.request.COOKIES.get("djdt" + self.panel_id, default) == "on"
# Titles and content
@property
def nav_title(self):
"""
Title shown in the side bar. Defaults to :attr:`title`.
"""
return self.title
@property
def nav_subtitle(self):
"""
Subtitle shown in the side bar. Defaults to the empty string.
"""
return ""
@property
def has_content(self):
"""
``True`` if the panel can be displayed in full screen, ``False`` if
it's only shown in the side bar. Defaults to ``True``.
"""
return True
@property
def is_historical(self):
"""
Panel supports rendering historical values.
Defaults to :attr:`has_content`.
"""
return self.has_content
@property
def title(self):
"""
Title shown in the panel when it's displayed in full screen.
Mandatory, unless the panel sets :attr:`has_content` to ``False``.
"""
raise NotImplementedError
@property
def template(self):
"""
Template used to render :attr:`content`.
Mandatory, unless the panel sets :attr:`has_content` to ``False`` or
overrides :attr:`content`.
"""
raise NotImplementedError
@property
def content(self):
"""
Content of the panel when it's displayed in full screen.
By default this renders the template defined by :attr:`template`.
Statistics stored with :meth:`record_stats` are available in the
template's context.
"""
if self.has_content:
return render_to_string(self.template, self.get_stats())
@property
def scripts(self):
"""
Scripts used by the HTML content of the panel when it's displayed.
When a panel is rendered on the frontend, the ``djdt.panel.render``
JavaScript event will be dispatched. The scripts can listen for
this event to support dynamic functionality.
"""
return []
# URLs for panel-specific views
@classmethod
def get_urls(cls):
"""
Return URLpatterns, if the panel has its own views.
"""
return []
# Enable and disable (expensive) instrumentation, must be idempotent
def enable_instrumentation(self):
"""
Enable instrumentation to gather data for this panel.
This usually means monkey-patching (!) or registering signal
receivers. Any instrumentation with a non-negligible effect on
performance should be installed by this method rather than at import
time.
Unless the toolbar or this panel is disabled, this method will be
called early in ``DebugToolbarMiddleware``. It should be idempotent.
"""
def disable_instrumentation(self):
"""
Disable instrumentation to gather data for this panel.
This is the opposite of :meth:`enable_instrumentation`.
Unless the toolbar or this panel is disabled, this method will be
called late in the middleware. It should be idempotent.
"""
# Store and retrieve stats (shared between panels for no good reason)
def record_stats(self, stats):
"""
Store data gathered by the panel. ``stats`` is a :class:`dict`.
Each call to ``record_stats`` updates the statistics dictionary.
"""
self.toolbar.stats.setdefault(self.panel_id, {}).update(stats)
def get_stats(self):
"""
Access data stored by the panel. Returns a :class:`dict`.
"""
return self.toolbar.stats.get(self.panel_id, {})
def record_server_timing(self, key, title, value):
"""
Store data gathered by the panel. ``stats`` is a :class:`dict`.
Each call to ``record_stats`` updates the statistics dictionary.
"""
data = {key: {"title": title, "value": value}}
self.toolbar.server_timing_stats.setdefault(self.panel_id, {}).update(data)
def get_server_timing_stats(self):
"""
Access data stored by the panel. Returns a :class:`dict`.
"""
return self.toolbar.server_timing_stats.get(self.panel_id, {})
# Standard middleware methods
def process_request(self, request):
"""
Like __call__ in Django's middleware.
Write panel logic related to the request there. Save data with
:meth:`record_stats`.
Return the existing response or overwrite it.
"""
return self.get_response(request)
def get_headers(self, request):
"""
Get headers the panel needs to set.
Called after :meth:`process_request
<debug_toolbar.panels.Panel.generate_stats>` and
:meth:`process_request<debug_toolbar.panels.Panel.generate_stats>`
Header values will be appended if multiple panels need to set it.
By default it sets the Server-Timing header.
Return dict of headers to be appended.
"""
headers = {}
stats = self.get_server_timing_stats()
if stats:
headers["Server-Timing"] = ", ".join(
# example: `SQLPanel_sql_time;dur=0;desc="SQL 0 queries"`
'{}_{};dur={};desc="{}"'.format(
self.panel_id, key, record.get("value"), record.get("title")
)
for key, record in stats.items()
)
return headers
def generate_stats(self, request, response):
"""
Write panel logic related to the response there. Post-process data
gathered while the view executed. Save data with :meth:`record_stats`.
Called after :meth:`process_request
<debug_toolbar.panels.Panel.process_request>`.
Does not return a value.
"""
def generate_server_timing(self, request, response):
"""
Similar to :meth:`generate_stats
<debug_toolbar.panels.Panel.generate_stats>`,
Generate stats for Server Timing https://w3c.github.io/server-timing/
Does not return a value.
"""
@classmethod
def run_checks(cls):
"""
Check that the integration is configured correctly for the panel.
This will be called as a part of the Django checks system when the
application is being setup.
Return a list of :class:`django.core.checks.CheckMessage` instances.
"""
return []
| |
#!/usr/bin/env python
# FIXME: Upgrade dependencies to Python3
from __future__ import print_function
import sys, os, shutil, re, argparse
import acf
import distutils.dir_util, distutils.file_util
# FIXME: Determine this from Steam
main_libraries_paths = [
'/cygdrive/c/Steam',
'/cygdrive/d/SteamLibrary',
'/cygdrive/e/SteamLibrary',
'/cygdrive/s/SteamLibrary',
]
# FIXME: Pass in with command line or read from config file
update_required_library_path = '/cygdrive/g/SteamLibrary'
# http://forums.steampowered.com/forums/showthread.php?t=2952766
class AppState(object):
Invalid = 0x000000
Uninstalled = 0x000001
UpdateRequired = 0x000002
FullyInstalled = 0x000004
Encrypted = 0x000008
Locked = 0x000010
FilesMissing = 0x000020
AppRunning = 0x000040
FilesCorrupt = 0x000080
UpdateRunning = 0x000100
UpdatePaused = 0x000200
UpdateStarted = 0x000400
Uninstalling = 0x000800
BackupRunning = 0x001000
Reconfiguring = 0x010000
Validating = 0x020000
AddingFiles = 0x040000
Preallocating = 0x080000
Downloading = 0x100000
Staging = 0x200000
Committing = 0x400000
UpdateStopping = 0x800000
# Any others?
CopyToUpdateRequired = UpdateRequired | FilesMissing | FilesCorrupt
# FIXME: Using UNIX paths here!
class App(object):
def __init__(self, acf_file, library, add_to_apps):
self.acf_file = acf_file
self.library = library
self.status = acf.parse_acf(acf_file)
# Sanity check if the acf file appears to be valid before we add it to any lists:
self.appid
self.name
if self.appid in app_names and app_names[self.appid] != self.name:
print('{} in multiple libraries with different names: "{}", "{}"'.format(self.appid, app_names[self.appid], self.name))
app_names[self.appid] = self.name
if add_to_apps:
if self.appid not in apps:
apps[self.appid] = []
apps[self.appid].append(self)
@property
def appid(self):
try:
return self.status['AppState']['appID']
except:
return self.status['AppState']['appid']
@property
def name(self):
try:
return self.status['AppState']['name']
except:
try:
return self.status['AppState']['UserConfig']['name']
except:
try:
return self.appid
except KeyError as e:
print("Unable to identify app name. Missing key {}. Contents: {}".format(str(e), self.status))
return None
@property
def install_dir(self):
return self.status['AppState']['installdir']
@property
def path(self):
return os.path.join(self.library.game_path, self.install_dir)
@property
def acf_path(self):
return os.path.join(self.library.acf_path, self.acf_file)
@property
def state_flags(self):
return int(self.status['AppState']['StateFlags'])
@property
def last_updated(self):
return int(self.status['AppState']['LastUpdated'])
apps = {}
app_names = {}
class Library(dict):
def __init__(self, path, add_to_apps=True):
self.path = path
SteamApps = os.path.join(path, 'SteamApps')
files = os.listdir(os.path.join(path, 'SteamApps'))
pattern = re.compile(r'^appmanifest_[0-9]+.acf$', re.IGNORECASE)
for file in [ x for x in files if pattern.match(x) ]:
acf_file = os.path.join(SteamApps, file)
try:
app = App(acf_file, self, add_to_apps)
except KeyError as e:
print('{} missing key {}'.format(acf_file, str(e)))
continue
self[app.appid] = app
@property
def acf_path(self):
return os.path.join(self.path, 'SteamApps')
@property
def game_path(self):
return os.path.join(self.path, 'SteamApps', 'common')
def parse_libraries():
global main_libraries
global update_required_library
global all_libraries
global new_games_library
print('Loading libraries...')
main_libraries = map(Library, args.library)
if args.updates_library:
update_required_library = Library(args.updates_library, False)
all_libraries = main_libraries + [update_required_library]
else:
update_required_library = None
all_libraries = main_libraries
if args.copy_new_games_to:
new_games_library = main_libraries[args.library.index(args.copy_new_games_to)]
elif len(main_libraries) == 1:
new_games_library = main_libraries[0]
else:
new_games_library = None
def check_duplicates():
print('Checking for AppIDs installed in multiple libraries...')
duplicates = {}
for i, lib1 in enumerate(main_libraries):
for lib2 in main_libraries[i+1:]:
intersection = set(lib1).intersection(lib2)
for appid in intersection:
if appid not in duplicates:
duplicates[appid] = set()
duplicates[appid].add(lib1.path)
duplicates[appid].add(lib2.path)
if duplicates:
for appid in duplicates:
print(' App ID {} ({}) found in: {}'.format(appid, app_names[appid], ', '.join(sorted(duplicates[appid]))))
def check_app_dirs():
print('\nChecking for bad or missing install dirs...')
for library in all_libraries:
for appid, app in library.iteritems():
installdir = app.install_dir
if '/' in installdir or '\\' in installdir:
print(' App ID {} ({}) in {} specifies absolute installdir:'.format(appid, app_names[appid], library.path))
print(' "{}"'.format(installdir))
if not os.path.isdir(installdir):
print(" ... and it's missing")
else:
# TODO: Check for matches with differing case
path = os.path.join(library.game_path, installdir)
if not os.path.isdir(path):
print(' App ID {} ({}) in {} missing installation directory:'.format(appid, app_names[appid], library.path))
print(' "{}"'.format(path))
if args.remove_acf_for_missing_games:
print(' Removing {}'.format(app.acf_path))
os.remove(app.acf_path)
def check_untracked_directories():
print('\nChecking for untracked game directories...')
for library in all_libraries:
tracked_dirs = set(map(str.lower, [ x.install_dir for x in library.itervalues() ]))
actual_dirs = set(map(str.lower, os.listdir(library.game_path)))
for untracked in actual_dirs.difference(tracked_dirs):
path = os.path.join(library.game_path, untracked)
print(' Untracked directory: {}'.format(path), end='')
if args.remove_untracked:
print(' Removing...', end='')
sys.stdout.flush()
shutil.rmtree(path)
print(' Done.', end='')
print()
def synchronise_update_required():
print('\nSynchronising update library...')
for library in main_libraries:
for appid, app in library.iteritems():
if (app.state_flags & AppState.CopyToUpdateRequired) == 0:
continue
print('\n {} StateFlags = {}'.format(app.name, app.state_flags))
if appid in update_required_library:
print(' {} already in {}'.format(app.name, update_required_library.path))
continue
game_dir = app.install_dir
source = os.path.join(library.game_path, game_dir)
dest = os.path.join(update_required_library.game_path, game_dir)
acf_basename = os.path.basename(app.acf_file).lower()
acf_dest = os.path.join(update_required_library.acf_path, acf_basename)
print(' Copying {} to {}'.format(app.name, dest))
# FIXME: May need to merge existing trees)
# TODO: If we have all the mounted manifest files, use
# them to copy only the files that are known to Steam
try:
shutil.copytree(source, dest)
shutil.copy(app.acf_file, acf_dest)
except Exception as e:
print(' {} occurred while copying {}: {}'.format(e.__class__.__name__, app.name, str(e)))
def synchronise_game(app, dest_path, dest_acf_path):
print('\n Copying {} ({}) to {}...'.format(app.name, app.appid, dest_path))
# None of the built in copy method in Python are exactly what I
# want. This will do for now, but eventually I'd like to use my
# own logic to decide which files to update - if the filesize
# or SHA1 differs (either obtained from a manifest file or
# reading the file) it should be updated. If all manifest files
# are present we can also skip (or even remove) untracked files.
distutils.dir_util.copy_tree(app.path, dest_path, update=1)
distutils.file_util.copy_file(app.acf_path, dest_acf_path)
def synchronise_update_required_reverse():
print('\nSynchronising back updates...')
for appid, app in update_required_library.iteritems():
if app.state_flags != AppState.FullyInstalled:
continue
if appid not in apps:
if new_games_library:
dest_path = os.path.join(new_games_library.game_path, app.install_dir)
acf_basename = os.path.basename(app.acf_file).lower()
dest_acf_path = os.path.join(new_games_library.acf_path, acf_basename)
synchronise_game(app, dest_path, dest_acf_path)
continue
print('\n App ID {} ({}) not found in any main library, not synchronising!'.format(appid, app.name))
continue
if len(apps[appid]) != 1:
print('\n App ID {} ({}) in multiple main libraries, not synchronising!'.format(appid, app.name))
continue
installed = apps[appid][0]
if installed.status == app.status:
# print('\n {} ({}) is up to date'.format(appid, app.name))
continue
if installed.last_updated >= app.last_updated:
print('\n Local install of app {} ({}) is more recent, not synchronising!'.format(appid, app.name))
continue
# TODO: Do this safely if Steam is running. Not sure what the
# best option is for that - if nothing else I might be able to
# rename the target directory, tell Steam to uninstall it, copy
# the files, rename it back then restart Steam.
synchronise_game(app, installed.path, installed.acf_path)
def parse_args():
global args
parser = argparse.ArgumentParser(description = 'Steam library manager')
parser.add_argument('-l', '--library', action='append',
help='Location of a regular Steam library to process, can specify multiple times')
parser.add_argument('-U', '--updates-library',
help='A special library that is intended for games that require updates, such as a library on a portable hard drive.')
parser.add_argument('--check', action='store_true',
help='Check the libraries for common problems')
parser.add_argument('--copy-update-required', action='store_true',
help='Copy any games that require updates to the library specified by --updates-library')
parser.add_argument('--sync-updated', action='store_true',
help='Copy any games that have been updated in the library specified by --updates-library back to the main library')
parser.add_argument('--copy-new-games-to',
help='With --sync-updated, copy games from the updates library to this library')
parser.add_argument('--remove-untracked', action='store_true',
help='Remove untracked directories from libraries (USE WITH CAUTION)')
parser.add_argument('--remove-acf-for-missing-games', action='store_true',
help='Remove acf files that list non-existant installation directories (CAUTION: MAY REMOVE APPS CURRENTLY BEING INSTALLED)')
args = parser.parse_args()
# TODO: Replace with config file
if not args.library and not args.updates_library:
args.library = main_libraries_paths
args.updates_library = update_required_library_path
if not args.check and \
not args.copy_update_required and \
not args.sync_updated:
args.check = True
if args.updates_library:
args.copy_update_required = True
args.sync_updated = True
def main():
parse_args()
parse_libraries()
if args.check:
check_duplicates()
check_app_dirs()
# TODO: check_stale_downloads()
if args.check or args.remove_untracked:
check_untracked_directories()
if args.copy_update_required:
synchronise_update_required()
if args.sync_updated:
synchronise_update_required_reverse()
if __name__ == '__main__':
main()
# vi: et ts=4:sw=4
| |
"""
Assorted utilities for use in tests.
"""
import cmath
import contextlib
import errno
import math
import os
import sys
import tempfile
import numpy as np
from numba import config, errors, typing, utils, numpy_support
from numba.compiler import compile_extra, compile_isolated, Flags, DEFAULT_FLAGS
from numba.targets import cpu
import numba.unittest_support as unittest
from numba.runtime import rtsys
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
no_pyobj_flags = Flags()
is_on_numpy_16 = numpy_support.version == (1, 6)
skip_on_numpy_16 = unittest.skipIf(is_on_numpy_16,
"test requires Numpy 1.7 or later")
class CompilationCache(object):
"""
A cache of compilation results for various signatures and flags.
This can make tests significantly faster (or less slow).
"""
def __init__(self):
self.typingctx = typing.Context()
self.targetctx = cpu.CPUContext(self.typingctx)
self.cr_cache = {}
def compile(self, func, args, return_type=None, flags=DEFAULT_FLAGS):
"""
Compile the function or retrieve an already compiled result
from the cache.
"""
cache_key = (func, args, return_type, flags)
try:
cr = self.cr_cache[cache_key]
except KeyError:
cr = compile_extra(self.typingctx, self.targetctx, func,
args, return_type, flags, locals={})
self.cr_cache[cache_key] = cr
return cr
class TestCase(unittest.TestCase):
longMessage = True
# A random state yielding the same random numbers for any test case.
# Use as `self.random.<method name>`
@utils.cached_property
def random(self):
return np.random.RandomState(42)
def reset_module_warnings(self, module):
"""
Reset the warnings registry of a module. This can be necessary
as the warnings module is buggy in that regard.
See http://bugs.python.org/issue4180
"""
if isinstance(module, str):
module = sys.modules[module]
try:
del module.__warningregistry__
except AttributeError:
pass
@contextlib.contextmanager
def assertTypingError(self):
"""
A context manager that asserts the enclosed code block fails
compiling in nopython mode.
"""
_accepted_errors = (errors.LoweringError, errors.TypingError,
TypeError, NotImplementedError)
with self.assertRaises(_accepted_errors) as cm:
yield cm
@contextlib.contextmanager
def assertRefCount(self, *objects):
"""
A context manager that asserts the given objects have the
same reference counts before and after executing the
enclosed blocks.
"""
old_refcounts = [sys.getrefcount(x) for x in objects]
yield
new_refcounts = [sys.getrefcount(x) for x in objects]
for old, new, obj in zip(old_refcounts, new_refcounts, objects):
if old != new:
self.fail("Refcount changed from %d to %d for object: %r"
% (old, new, obj))
_exact_typesets = [(bool, np.bool_), utils.INT_TYPES, (str,), (np.integer,), (utils.text_type), ]
_approx_typesets = [(float,), (complex,), (np.inexact)]
_sequence_typesets = [(tuple, list)]
_float_types = (float, np.floating)
_complex_types = (complex, np.complexfloating)
def _detect_family(self, numeric_object):
"""
This function returns a string description of the type family
that the object in question belongs to. Possible return values
are: "exact", "complex", "approximate", "sequence", and "unknown"
"""
if isinstance(numeric_object, np.ndarray):
return "ndarray"
for tp in self._sequence_typesets:
if isinstance(numeric_object, tp):
return "sequence"
for tp in self._exact_typesets:
if isinstance(numeric_object, tp):
return "exact"
for tp in self._complex_types:
if isinstance(numeric_object, tp):
return "complex"
for tp in self._approx_typesets:
if isinstance(numeric_object, tp):
return "approximate"
return "unknown"
def _fix_dtype(self, dtype):
"""
Fix the given *dtype* for comparison.
"""
# Under 64-bit Windows, Numpy may return either int32 or int64
# arrays depending on the function.
if (sys.platform == 'win32' and sys.maxsize > 2**32 and
dtype == np.dtype('int32')):
return np.dtype('int64')
else:
return dtype
def _fix_strides(self, arr):
"""
Return the strides of the given array, fixed for comparison.
Strides for 0- or 1-sized dimensions are ignored.
"""
return [stride / arr.itemsize
for (stride, shape) in zip(arr.strides, arr.shape)
if shape > 1]
def assertStridesEqual(self, first, second):
"""
Test that two arrays have the same shape and strides.
"""
self.assertEqual(first.shape, second.shape, "shapes differ")
self.assertEqual(first.itemsize, second.itemsize, "itemsizes differ")
self.assertEqual(self._fix_strides(first), self._fix_strides(second),
"strides differ")
def assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None):
"""
Test that two scalars have similar types and are equal up to
a computed precision.
If the scalars are instances of exact types or if *prec* is
'exact', they are compared exactly.
If the scalars are instances of inexact types (float, complex)
and *prec* is not 'exact', then the number of significant bits
is computed according to the value of *prec*: 53 bits if *prec*
is 'double', 24 bits if *prec* is single. This number of bits
can be lowered by raising the *ulps* value.
Any value of *prec* other than 'exact', 'single' or 'double'
will raise an error.
"""
try:
self._assertPreciseEqual(first, second, prec, ulps, msg)
except AssertionError as exc:
failure_msg = str(exc)
# Fall off of the 'except' scope to avoid Python 3 exception
# chaining.
else:
return
# Decorate the failure message with more information
self.fail("when comparing %s and %s: %s" % (first, second, failure_msg))
def _assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None):
"""Recursive workhorse for assertPreciseEqual()."""
def _assertNumberEqual(first, second, delta=None):
if (delta is None or first == second == 0.0
or math.isinf(first) or math.isinf(second)):
self.assertEqual(first, second, msg=msg)
# For signed zeros
try:
if math.copysign(1, first) != math.copysign(1, second):
self.fail(
self._formatMessage(msg,
"%s != %s" % (first, second)))
except TypeError:
pass
else:
self.assertAlmostEqual(first, second, delta=delta, msg=msg)
first_family = self._detect_family(first)
second_family = self._detect_family(second)
assertion_message = "Type Family mismatch. (%s != %s)" % (first_family, second_family)
if msg:
assertion_message += ': %s' % (msg,)
self.assertEqual(first_family, second_family, msg=assertion_message)
# We now know they are in the same comparison family
compare_family = first_family
# For recognized sequences, recurse
if compare_family == "ndarray":
dtype = self._fix_dtype(first.dtype)
self.assertEqual(dtype, self._fix_dtype(second.dtype))
self.assertEqual(first.ndim, second.ndim,
"different number of dimensions")
self.assertEqual(first.shape, second.shape,
"different shapes")
self.assertEqual(first.flags.writeable, second.flags.writeable,
"different mutability")
# itemsize is already checked by the dtype test above
self.assertEqual(self._fix_strides(first), self._fix_strides(second),
"different strides")
if first.dtype != dtype:
first = first.astype(dtype)
if second.dtype != dtype:
second = second.astype(dtype)
for a, b in zip(first.flat, second.flat):
self._assertPreciseEqual(a, b, prec, ulps, msg)
return
if compare_family == "sequence":
self.assertEqual(len(first), len(second), msg=msg)
for a, b in zip(first, second):
self._assertPreciseEqual(a, b, prec, ulps, msg)
return
if compare_family == "exact":
exact_comparison = True
if compare_family in ["complex", "approximate"]:
exact_comparison = False
if compare_family == "unknown":
# Assume these are non-numeric types: we will fall back
# on regular unittest comparison.
self.assertIs(first.__class__, second.__class__)
exact_comparison = True
# If a Numpy scalar, check the dtype is exactly the same too
# (required for datetime64 and timedelta64).
if hasattr(first, 'dtype') and hasattr(second, 'dtype'):
self.assertEqual(first.dtype, second.dtype)
try:
if cmath.isnan(first) and cmath.isnan(second):
# The NaNs will compare unequal, skip regular comparison
return
except TypeError:
# Not floats.
pass
exact_comparison = exact_comparison or prec == 'exact'
if not exact_comparison and prec != 'exact':
if prec == 'single':
bits = 24
elif prec == 'double':
bits = 53
else:
raise ValueError("unsupported precision %r" % (prec,))
k = 2 ** (ulps - bits - 1)
delta = k * (abs(first) + abs(second))
else:
delta = None
if isinstance(first, self._complex_types):
_assertNumberEqual(first.real, second.real, delta)
_assertNumberEqual(first.imag, second.imag, delta)
else:
_assertNumberEqual(first, second, delta)
def run_nullary_func(self, pyfunc, flags):
"""
Compile the 0-argument *pyfunc* with the given *flags*, and check
it returns the same result as the pure Python function.
The got and expected results are returned.
"""
cr = compile_isolated(pyfunc, (), flags=flags)
cfunc = cr.entry_point
expected = pyfunc()
got = cfunc()
self.assertPreciseEqual(got, expected)
return got, expected
# Various helpers
@contextlib.contextmanager
def override_config(name, value):
"""
Return a context manager that temporarily sets Numba config variable
*name* to *value*. *name* must be the name of an existing variable
in numba.config.
"""
old_value = getattr(config, name)
setattr(config, name, value)
try:
yield
finally:
setattr(config, name, old_value)
def compile_function(name, code, globs):
"""
Given a *code* string, compile it with globals *globs* and return
the function named *name*.
"""
co = compile(code, "<string>", "exec")
ns = {}
eval(co, globs, ns)
return ns[name]
def tweak_code(func, codestring=None, consts=None):
"""
Tweak the code object of the given function by replacing its
*codestring* (a bytes object) and *consts* tuple, optionally.
"""
co = func.__code__
tp = type(co)
if codestring is None:
codestring = co.co_code
if consts is None:
consts = co.co_consts
if sys.version_info >= (3,):
new_code = tp(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
else:
new_code = tp(co.co_argcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
func.__code__ = new_code
def static_temp_directory(dirname):
"""
Create a directory in the temp dir with a given name. Statically-named
temp dirs created using this function are needed because we can't delete a
DLL under Windows (this is a bit fragile if stale files can influence the
result of future test runs).
"""
tmpdir = os.path.join(tempfile.gettempdir(), dirname)
try:
os.mkdir(tmpdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return tmpdir
# From CPython
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, utils.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
class MemoryLeak(object):
__enable_leak_check = True
def memory_leak_setup(self):
self.__init_stats = rtsys.get_allocation_stats()
def memory_leak_teardown(self):
if self.__enable_leak_check:
old = self.__init_stats
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free)
self.assertEqual(total_mi_alloc, total_mi_free)
def disable_leak_check(self):
# For per-test use when MemoryLeakMixin is injected into a TestCase
self.__enable_leak_check = False
class MemoryLeakMixin(MemoryLeak):
def setUp(self):
super(MemoryLeakMixin, self).setUp()
self.memory_leak_setup()
def tearDown(self):
super(MemoryLeakMixin, self).tearDown()
self.memory_leak_teardown()
| |
import unittest
import tkinter
from tkinter import ttk, TclError
from test.support import requires
import sys
from tkinter.test.test_ttk.test_functions import MockTclObj
from tkinter.test.support import (AbstractTkTest, tcl_version, get_tk_patchlevel,
simulate_mouse_click)
from tkinter.test.widget_tests import (add_standard_options, noconv,
AbstractWidgetTest, StandardOptionsTests, IntegerSizeTests, PixelSizeTests,
setUpModule)
requires('gui')
class StandardTtkOptionsTests(StandardOptionsTests):
def test_class(self):
widget = self.create()
self.assertEqual(widget['class'], '')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0, 'beta', 3):
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'class', 'Foo', errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
def test_padding(self):
widget = self.create()
self.checkParam(widget, 'padding', 0, expected=('0',))
self.checkParam(widget, 'padding', 5, expected=('5',))
self.checkParam(widget, 'padding', (5, 6), expected=('5', '6'))
self.checkParam(widget, 'padding', (5, 6, 7),
expected=('5', '6', '7'))
self.checkParam(widget, 'padding', (5, 6, 7, 8),
expected=('5', '6', '7', '8'))
self.checkParam(widget, 'padding', ('5p', '6p', '7p', '8p'))
self.checkParam(widget, 'padding', (), expected='')
def test_style(self):
widget = self.create()
self.assertEqual(widget['style'], '')
errmsg = 'Layout Foo not found'
if hasattr(self, 'default_orient'):
errmsg = ('Layout %s.Foo not found' %
getattr(self, 'default_orient').title())
self.checkInvalidParam(widget, 'style', 'Foo',
errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
# XXX
pass
class WidgetTest(AbstractTkTest, unittest.TestCase):
"""Tests methods available in every ttk widget."""
def setUp(self):
super().setUp()
self.widget = ttk.Button(self.root, width=0, text="Text")
self.widget.pack()
self.widget.wait_visibility()
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
int(self.widget.winfo_width() / 2),
int(self.widget.winfo_height() / 2)
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.widget.identify, None, 5)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, None)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, '')
def test_widget_state(self):
# XXX not sure about the portability of all these tests
self.assertEqual(self.widget.state(), ())
self.assertEqual(self.widget.instate(['!disabled']), True)
# changing from !disabled to disabled
self.assertEqual(self.widget.state(['disabled']), ('!disabled', ))
# no state change
self.assertEqual(self.widget.state(['disabled']), ())
# change back to !disable but also active
self.assertEqual(self.widget.state(['!disabled', 'active']),
('!active', 'disabled'))
# no state changes, again
self.assertEqual(self.widget.state(['!disabled', 'active']), ())
self.assertEqual(self.widget.state(['active', '!disabled']), ())
def test_cb(arg1, **kw):
return arg1, kw
self.assertEqual(self.widget.instate(['!disabled'],
test_cb, "hi", **{"msg": "there"}),
('hi', {'msg': 'there'}))
# attempt to set invalid statespec
currstate = self.widget.state()
self.assertRaises(tkinter.TclError, self.widget.instate,
['badstate'])
self.assertRaises(tkinter.TclError, self.widget.instate,
['disabled', 'badstate'])
# verify that widget didn't change its state
self.assertEqual(currstate, self.widget.state())
# ensuring that passing None as state doesn't modify current state
self.widget.state(['active', '!disabled'])
self.assertEqual(self.widget.state(), ('active', ))
class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
_conv_pixels = noconv
@add_standard_options(StandardTtkOptionsTests)
class FrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'padding', 'relief', 'style', 'takefocus',
'width',
)
def create(self, **kwargs):
return ttk.Frame(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'labelanchor', 'labelwidget',
'padding', 'relief', 'style', 'takefocus',
'text', 'underline', 'width',
)
def create(self, **kwargs):
return ttk.LabelFrame(self.root, **kwargs)
def test_labelanchor(self):
widget = self.create()
self.checkEnumParam(widget, 'labelanchor',
'e', 'en', 'es', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w', 'wn', 'ws',
errmsg='Bad label anchor specification {}')
self.checkInvalidParam(widget, 'labelanchor', 'center')
def test_labelwidget(self):
widget = self.create()
label = ttk.Label(self.root, text='Mupp', name='foo')
self.checkParam(widget, 'labelwidget', label, expected='.foo')
label.destroy()
class AbstractLabelTest(AbstractWidgetTest):
def checkImageParam(self, widget, name):
image = tkinter.PhotoImage(master=self.root, name='image1')
image2 = tkinter.PhotoImage(master=self.root, name='image2')
self.checkParam(widget, name, image, expected=('image1',))
self.checkParam(widget, name, 'image1', expected=('image1',))
self.checkParam(widget, name, (image,), expected=('image1',))
self.checkParam(widget, name, (image, 'active', image2),
expected=('image1', 'active', 'image2'))
self.checkParam(widget, name, 'image1 active image2',
expected=('image1', 'active', 'image2'))
self.checkInvalidParam(widget, name, 'spam',
errmsg='image "spam" doesn\'t exist')
def test_compound(self):
widget = self.create()
self.checkEnumParam(widget, 'compound',
'none', 'text', 'image', 'center',
'top', 'bottom', 'left', 'right')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def test_width(self):
widget = self.create()
self.checkParams(widget, 'width', 402, -402, 0)
@add_standard_options(StandardTtkOptionsTests)
class LabelTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'anchor', 'background', 'borderwidth',
'class', 'compound', 'cursor', 'font', 'foreground',
'image', 'justify', 'padding', 'relief', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
_conv_pixels = noconv
def create(self, **kwargs):
return ttk.Label(self.root, **kwargs)
def test_font(self):
widget = self.create()
self.checkParam(widget, 'font',
'-Adobe-Helvetica-Medium-R-Normal--*-120-*-*-*-*-*-*')
@add_standard_options(StandardTtkOptionsTests)
class ButtonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor', 'default',
'image', 'padding', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Button(self.root, **kwargs)
def test_default(self):
widget = self.create()
self.checkEnumParam(widget, 'default', 'normal', 'active', 'disabled')
def test_invoke(self):
success = []
btn = ttk.Button(self.root, command=lambda: success.append(1))
btn.invoke()
self.assertTrue(success)
@add_standard_options(StandardTtkOptionsTests)
class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'offvalue', 'onvalue',
'padding', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Checkbutton(self.root, **kwargs)
def test_offvalue(self):
widget = self.create()
self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
def test_onvalue(self):
widget = self.create()
self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
cbtn = ttk.Checkbutton(self.root, command=cb_test)
# the variable automatically created by ttk.Checkbutton is actually
# undefined till we invoke the Checkbutton
self.assertEqual(cbtn.state(), ('alternate', ))
self.assertRaises(tkinter.TclError, cbtn.tk.globalgetvar,
cbtn['variable'])
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['onvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn['command'] = ''
res = cbtn.invoke()
self.assertFalse(str(res))
self.assertLessEqual(len(success), 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class EntryTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'class', 'cursor',
'exportselection', 'font', 'foreground',
'invalidcommand', 'justify',
'show', 'state', 'style', 'takefocus', 'textvariable',
'validate', 'validatecommand', 'width', 'xscrollcommand',
)
def setUp(self):
super().setUp()
self.entry = self.create()
def create(self, **kwargs):
return ttk.Entry(self.root, **kwargs)
def test_invalidcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'invalidcommand')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', '*')
self.checkParam(widget, 'show', '')
self.checkParam(widget, 'show', ' ')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state',
'disabled', 'normal', 'readonly')
def test_validate(self):
widget = self.create()
self.checkEnumParam(widget, 'validate',
'all', 'key', 'focus', 'focusin', 'focusout', 'none')
def test_validatecommand(self):
widget = self.create()
self.checkCommandParam(widget, 'validatecommand')
def test_bbox(self):
self.assertIsBoundingBox(self.entry.bbox(0))
self.assertRaises(tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(tkinter.TclError, self.entry.bbox, None)
def test_identify(self):
self.entry.pack()
self.entry.wait_visibility()
self.entry.update_idletasks()
# bpo-27313: macOS Cocoa widget differs from X, allow either
if sys.platform == 'darwin':
self.assertIn(self.entry.identify(5, 5),
("textarea", "Combobox.button") )
else:
self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.entry.identify, None, 5)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, None)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, '')
def test_validation_options(self):
success = []
test_invalid = lambda: success.append(True)
self.entry['validate'] = 'none'
self.entry['validatecommand'] = lambda: False
self.entry['invalidcommand'] = test_invalid
self.entry.validate()
self.assertTrue(success)
self.entry['invalidcommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['invalidcommand'] = test_invalid
self.entry['validatecommand'] = lambda: True
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = True
self.assertRaises(tkinter.TclError, self.entry.validate)
def test_validation(self):
validation = []
def validate(to_insert):
if not 'a' <= to_insert.lower() <= 'z':
validation.append(False)
return False
validation.append(True)
return True
self.entry['validate'] = 'key'
self.entry['validatecommand'] = self.entry.register(validate), '%S'
self.entry.insert('end', 1)
self.entry.insert('end', 'a')
self.assertEqual(validation, [False, True])
self.assertEqual(self.entry.get(), 'a')
def test_revalidation(self):
def validate(content):
for letter in content:
if not 'a' <= letter.lower() <= 'z':
return False
return True
self.entry['validatecommand'] = self.entry.register(validate), '%P'
self.entry.insert('end', 'avocado')
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
self.entry.delete(0, 'end')
self.assertEqual(self.entry.get(), '')
self.entry.insert('end', 'a1b')
self.assertEqual(self.entry.validate(), False)
self.assertEqual(self.entry.state(), ('invalid', ))
self.entry.delete(1)
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class ComboboxTest(EntryTest, unittest.TestCase):
OPTIONS = (
'background', 'class', 'cursor', 'exportselection',
'font', 'foreground', 'height', 'invalidcommand',
'justify', 'postcommand', 'show', 'state', 'style',
'takefocus', 'textvariable',
'validate', 'validatecommand', 'values',
'width', 'xscrollcommand',
)
def setUp(self):
super().setUp()
self.combo = self.create()
def create(self, **kwargs):
return ttk.Combobox(self.root, **kwargs)
def test_height(self):
widget = self.create()
self.checkParams(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i')
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
self.combo.update_idletasks()
def test_virtual_event(self):
success = []
self.combo['values'] = [1]
self.combo.bind('<<ComboboxSelected>>',
lambda evt: success.append(True))
self.combo.pack()
self.combo.wait_visibility()
height = self.combo.winfo_height()
self._show_drop_down_listbox()
self.combo.update()
self.combo.event_generate('<Return>')
self.combo.update()
self.assertTrue(success)
def test_postcommand(self):
success = []
self.combo['postcommand'] = lambda: success.append(True)
self.combo.pack()
self.combo.wait_visibility()
self._show_drop_down_listbox()
self.assertTrue(success)
# testing postcommand removal
self.combo['postcommand'] = ''
self._show_drop_down_listbox()
self.assertEqual(len(success), 1)
def test_values(self):
def check_get_current(getval, currval):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
self.assertEqual(self.combo['values'],
() if tcl_version < (8, 5) else '')
check_get_current('', -1)
self.checkParam(self.combo, 'values', 'mon tue wed thur',
expected=('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', ('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', (42, 3.14, '', 'any string'))
self.checkParam(self.combo, 'values', '')
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
check_get_current('c', 2)
self.combo.current(0)
check_get_current('a', 0)
self.combo.set('d')
check_get_current('d', -1)
# testing values with empty string
self.combo.set('')
self.combo['values'] = (1, 2, '', 3)
check_get_current('', 2)
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
self.assertEqual(self.combo['values'],
('1', '', '2') if self.wantobjects else
'1 {} 2')
# testing values with spaces
self.combo['values'] = ['a b', 'a\tb', 'a\nb']
self.assertEqual(self.combo['values'],
('a b', 'a\tb', 'a\nb') if self.wantobjects else
'{a b} {a\tb} {a\nb}')
# testing values with special characters
self.combo['values'] = [r'a\tb', '"a"', '} {']
self.assertEqual(self.combo['values'],
(r'a\tb', '"a"', '} {') if self.wantobjects else
r'a\\tb {"a"} \}\ \{')
# out of range
self.assertRaises(tkinter.TclError, self.combo.current,
len(self.combo['values']))
# it expects an integer (or something that can be converted to int)
self.assertRaises(tkinter.TclError, self.combo.current, '')
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(self.root, values=[1, 2, ''])
self.assertEqual(combo2['values'],
('1', '2', '') if self.wantobjects else '1 2 {}')
combo2.destroy()
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height',
'orient', 'style', 'takefocus', 'width',
)
def setUp(self):
super().setUp()
self.paned = self.create()
def create(self, **kwargs):
return ttk.PanedWindow(self.root, **kwargs)
def test_orient(self):
widget = self.create()
self.assertEqual(str(widget['orient']), 'vertical')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0, 'beta', 3):
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'orient', 'horizontal',
errmsg=errmsg)
widget2 = self.create(orient='horizontal')
self.assertEqual(str(widget2['orient']), 'horizontal')
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
label = ttk.Label(self.paned)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
label.destroy()
child.destroy()
# another attempt
label = ttk.Label(self.root)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
child.destroy()
label.destroy()
good_child = ttk.Label(self.root)
self.paned.add(good_child)
# re-adding a child is not accepted
self.assertRaises(tkinter.TclError, self.paned.add, good_child)
other_child = ttk.Label(self.paned)
self.paned.add(other_child)
self.assertEqual(self.paned.pane(0), self.paned.pane(1))
self.assertRaises(tkinter.TclError, self.paned.pane, 2)
good_child.destroy()
other_child.destroy()
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.paned.forget, None)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
self.paned.add(ttk.Label(self.root))
self.paned.forget(0)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
def test_insert(self):
self.assertRaises(tkinter.TclError, self.paned.insert, None, 0)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, None)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, 0)
child = ttk.Label(self.root)
child2 = ttk.Label(self.root)
child3 = ttk.Label(self.root)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, child)
self.paned.insert('end', child2)
self.paned.insert(0, child)
self.assertEqual(self.paned.panes(), (str(child), str(child2)))
self.paned.insert(0, child2)
self.assertEqual(self.paned.panes(), (str(child2), str(child)))
self.paned.insert('end', child3)
self.assertEqual(self.paned.panes(),
(str(child2), str(child), str(child3)))
# reinserting a child should move it to its current position
panes = self.paned.panes()
self.paned.insert('end', child3)
self.assertEqual(panes, self.paned.panes())
# moving child3 to child2 position should result in child2 ending up
# in previous child position and child ending up in previous child3
# position
self.paned.insert(child2, child3)
self.assertEqual(self.paned.panes(),
(str(child3), str(child2), str(child)))
def test_pane(self):
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
child = ttk.Label(self.root)
self.paned.add(child)
self.assertIsInstance(self.paned.pane(0), dict)
self.assertEqual(self.paned.pane(0, weight=None),
0 if self.wantobjects else '0')
# newer form for querying a single option
self.assertEqual(self.paned.pane(0, 'weight'),
0 if self.wantobjects else '0')
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(tkinter.TclError, self.paned.pane, 0,
badoption='somevalue')
def test_sashpos(self):
self.assertRaises(tkinter.TclError, self.paned.sashpos, None)
self.assertRaises(tkinter.TclError, self.paned.sashpos, '')
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child = ttk.Label(self.paned, text='a')
self.paned.add(child, weight=1)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child2 = ttk.Label(self.paned, text='b')
self.paned.add(child2)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 1)
self.paned.pack(expand=True, fill='both')
self.paned.wait_visibility()
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
self.assertNotEqual(curr_pos, self.paned.sashpos(0))
self.assertIsInstance(self.paned.sashpos(0), int)
@add_standard_options(StandardTtkOptionsTests)
class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'padding', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'value', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Radiobutton(self.root, **kwargs)
def test_value(self):
widget = self.create()
self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
myvar = tkinter.IntVar(self.root)
cbtn = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=1)
if self.wantobjects:
conv = lambda x: x
else:
conv = int
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(conv(cbtn['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
self.assertEqual(str(res), '')
self.assertLessEqual(len(success), 1)
self.assertEqual(conv(cbtn2['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'compound', 'cursor', 'direction',
'image', 'menu', 'padding', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Menubutton(self.root, **kwargs)
def test_direction(self):
widget = self.create()
self.checkEnumParam(widget, 'direction',
'above', 'below', 'left', 'right', 'flush')
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(widget, name='menu')
self.checkParam(widget, 'menu', menu, conv=str)
menu.destroy()
@add_standard_options(StandardTtkOptionsTests)
class ScaleTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'from', 'length',
'orient', 'style', 'takefocus', 'to', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def setUp(self):
super().setUp()
self.scale = self.create()
self.scale.pack()
self.scale.update()
def create(self, **kwargs):
return ttk.Scale(self.root, **kwargs)
def test_from(self):
widget = self.create()
self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=False)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
def test_to(self):
widget = self.create()
self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10, conv=False)
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 300, 14.9, 15.1, -10, conv=False)
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
funcid = self.scale.bind('<<RangeChanged>>', lambda evt: failure.pop())
self.scale['from'] = 10
self.scale['from_'] = 10
self.scale['to'] = 3
self.assertFalse(failure)
failure = [1, 1, 1]
self.scale.configure(from_=2, to=5)
self.scale.configure(from_=0, to=-2)
self.scale.configure(to=10)
self.assertFalse(failure)
def test_get(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
self.assertEqual(conv(self.scale.get(0, 0)), conv(self.scale['from']))
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
self.assertRaises(tkinter.TclError, self.scale.get, '', 0)
self.assertRaises(tkinter.TclError, self.scale.get, 0, '')
def test_set(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
# set restricts the max/min values according to the current range
max = conv(self.scale['to'])
new_max = max + 10
self.scale.set(new_max)
self.assertEqual(conv(self.scale.get()), max)
min = conv(self.scale['from'])
self.scale.set(min - 1)
self.assertEqual(conv(self.scale.get()), min)
# changing directly the variable doesn't impose this limitation tho
var = tkinter.DoubleVar(self.root)
self.scale['variable'] = var
var.set(max + 5)
self.assertEqual(conv(self.scale.get()), var.get())
self.assertEqual(conv(self.scale.get()), max + 5)
del var
# the same happens with the value option
self.scale['value'] = max + 10
self.assertEqual(conv(self.scale.get()), max + 10)
self.assertEqual(conv(self.scale.get()), conv(self.scale['value']))
# nevertheless, note that the max/min values we can get specifying
# x, y coords are the ones according to the current range
self.assertEqual(conv(self.scale.get(0, 0)), min)
self.assertEqual(conv(self.scale.get(self.scale.winfo_width(), 0)), max)
self.assertRaises(tkinter.TclError, self.scale.set, None)
@add_standard_options(StandardTtkOptionsTests)
class ProgressbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'length',
'mode', 'maximum', 'phase',
'style', 'takefocus', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Progressbar(self.root, **kwargs)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 100.1, 56.7, '2i')
def test_maximum(self):
widget = self.create()
self.checkFloatParam(widget, 'maximum', 150.2, 77.7, 0, -10, conv=False)
def test_mode(self):
widget = self.create()
self.checkEnumParam(widget, 'mode', 'determinate', 'indeterminate')
def test_phase(self):
# XXX
pass
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 150.2, 77.7, 0, -10,
conv=False)
@unittest.skipIf(sys.platform == 'darwin',
'ttk.Scrollbar is special on MacOSX')
@add_standard_options(StandardTtkOptionsTests)
class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'orient', 'style', 'takefocus',
)
default_orient = 'vertical'
def create(self, **kwargs):
return ttk.Scrollbar(self.root, **kwargs)
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class NotebookTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height', 'padding', 'style', 'takefocus', 'width',
)
def setUp(self):
super().setUp()
self.nb = self.create(padding=0)
self.child1 = ttk.Label(self.root)
self.child2 = ttk.Label(self.root)
self.nb.add(self.child1, text='a')
self.nb.add(self.child2, text='b')
def create(self, **kwargs):
return ttk.Notebook(self.root, **kwargs)
def test_tab_identifiers(self):
self.nb.forget(0)
self.nb.hide(self.child2)
self.assertRaises(tkinter.TclError, self.nb.tab, self.child1)
self.assertEqual(self.nb.index('end'), 1)
self.nb.add(self.child2)
self.assertEqual(self.nb.index('end'), 1)
self.nb.select(self.child2)
self.assertTrue(self.nb.tab('current'))
self.nb.add(self.child1, text='a')
self.nb.pack()
self.nb.wait_visibility()
if sys.platform == 'darwin':
tb_idx = "@20,5"
else:
tb_idx = "@5,5"
self.assertEqual(self.nb.tab(tb_idx), self.nb.tab('current'))
for i in range(5, 100, 5):
try:
if self.nb.tab('@%d, 5' % i, text=None) == 'a':
break
except tkinter.TclError:
pass
else:
self.fail("Tab with text 'a' not found")
def test_add_and_hidden(self):
self.assertRaises(tkinter.TclError, self.nb.hide, -1)
self.assertRaises(tkinter.TclError, self.nb.hide, 'hi')
self.assertRaises(tkinter.TclError, self.nb.hide, None)
self.assertRaises(tkinter.TclError, self.nb.add, None)
self.assertRaises(tkinter.TclError, self.nb.add, ttk.Label(self.root),
unknown='option')
tabs = self.nb.tabs()
self.nb.hide(self.child1)
self.nb.add(self.child1)
self.assertEqual(self.nb.tabs(), tabs)
child = ttk.Label(self.root)
self.nb.add(child, text='c')
tabs = self.nb.tabs()
curr = self.nb.index('current')
# verify that the tab gets readded at its previous position
child2_index = self.nb.index(self.child2)
self.nb.hide(self.child2)
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
self.assertEqual(str(self.child2), self.nb.tabs()[child2_index])
# but the tab next to it (not hidden) is the one selected now
self.assertEqual(self.nb.index('current'), curr + 1)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.nb.forget, -1)
self.assertRaises(tkinter.TclError, self.nb.forget, 'hi')
self.assertRaises(tkinter.TclError, self.nb.forget, None)
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
self.assertNotIn(str(self.child1), self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
self.assertNotEqual(child1_index, self.nb.index(self.child1))
def test_index(self):
self.assertRaises(tkinter.TclError, self.nb.index, -1)
self.assertRaises(tkinter.TclError, self.nb.index, None)
self.assertIsInstance(self.nb.index('end'), int)
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
def test_insert(self):
# moving tabs
tabs = self.nb.tabs()
self.nb.insert(1, tabs[0])
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert(self.child1, self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert('end', self.child1)
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert('end', 0)
self.assertEqual(self.nb.tabs(), tabs)
# bad moves
self.assertRaises(tkinter.TclError, self.nb.insert, 2, tabs[0])
self.assertRaises(tkinter.TclError, self.nb.insert, -1, tabs[0])
# new tab
child3 = ttk.Label(self.root)
self.nb.insert(1, child3)
self.assertEqual(self.nb.tabs(), (tabs[0], str(child3), tabs[1]))
self.nb.forget(child3)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert(self.child1, child3)
self.assertEqual(self.nb.tabs(), (str(child3), ) + tabs)
self.nb.forget(child3)
self.assertRaises(tkinter.TclError, self.nb.insert, 2, child3)
self.assertRaises(tkinter.TclError, self.nb.insert, -1, child3)
# bad inserts
self.assertRaises(tkinter.TclError, self.nb.insert, 'end', None)
self.assertRaises(tkinter.TclError, self.nb.insert, None, 0)
self.assertRaises(tkinter.TclError, self.nb.insert, None, None)
def test_select(self):
self.nb.pack()
self.nb.wait_visibility()
success = []
tab_changed = []
self.child1.bind('<Unmap>', lambda evt: success.append(True))
self.nb.bind('<<NotebookTabChanged>>',
lambda evt: tab_changed.append(True))
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.select(self.child2)
self.assertTrue(success)
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.update()
self.assertTrue(tab_changed)
def test_tab(self):
self.assertRaises(tkinter.TclError, self.nb.tab, -1)
self.assertRaises(tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(tkinter.TclError, self.nb.tab, None)
self.assertIsInstance(self.nb.tab(self.child1), dict)
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
# newer form for querying a single option
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
self.nb.tab(self.child1, text='abc')
self.assertEqual(self.nb.tab(self.child1, text=None), 'abc')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'abc')
def test_tabs(self):
self.assertEqual(len(self.nb.tabs()), 2)
self.nb.forget(self.child1)
self.nb.forget(self.child2)
self.assertEqual(self.nb.tabs(), ())
def test_traversal(self):
self.nb.pack()
self.nb.wait_visibility()
self.nb.select(0)
simulate_mouse_click(self.nb, 5, 5)
self.nb.focus_force()
self.nb.event_generate('<Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.tab(self.child1, text='a', underline=0)
self.nb.enable_traversal()
self.nb.focus_force()
simulate_mouse_click(self.nb, 5, 5)
if sys.platform == 'darwin':
self.nb.event_generate('<Option-a>')
else:
self.nb.event_generate('<Alt-a>')
self.assertEqual(self.nb.select(), str(self.child1))
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class SpinboxTest(EntryTest, unittest.TestCase):
OPTIONS = (
'background', 'class', 'command', 'cursor', 'exportselection',
'font', 'foreground', 'format', 'from', 'increment',
'invalidcommand', 'justify', 'show', 'state', 'style',
'takefocus', 'textvariable', 'to', 'validate', 'validatecommand',
'values', 'width', 'wrap', 'xscrollcommand',
)
def setUp(self):
super().setUp()
self.spin = self.create()
self.spin.pack()
def create(self, **kwargs):
return ttk.Spinbox(self.root, **kwargs)
def _click_increment_arrow(self):
width = self.spin.winfo_width()
height = self.spin.winfo_height()
x = width - 5
y = height//2 - 5
self.spin.event_generate('<ButtonPress-1>', x=x, y=y)
self.spin.event_generate('<ButtonRelease-1>', x=x, y=y)
self.spin.update_idletasks()
def _click_decrement_arrow(self):
width = self.spin.winfo_width()
height = self.spin.winfo_height()
x = width - 5
y = height//2 + 4
self.spin.event_generate('<ButtonPress-1>', x=x, y=y)
self.spin.event_generate('<ButtonRelease-1>', x=x, y=y)
self.spin.update_idletasks()
def test_command(self):
success = []
self.spin['command'] = lambda: success.append(True)
self.spin.update()
self._click_increment_arrow()
self.spin.update()
self.assertTrue(success)
self._click_decrement_arrow()
self.assertEqual(len(success), 2)
# testing postcommand removal
self.spin['command'] = ''
self.spin.update_idletasks()
self._click_increment_arrow()
self._click_decrement_arrow()
self.spin.update()
self.assertEqual(len(success), 2)
def test_to(self):
self.spin['from'] = 0
self.spin['to'] = 5
self.spin.set(4)
self.spin.update()
self._click_increment_arrow() # 5
self.assertEqual(self.spin.get(), '5')
self._click_increment_arrow() # 5
self.assertEqual(self.spin.get(), '5')
def test_from(self):
self.spin['from'] = 1
self.spin['to'] = 10
self.spin.set(2)
self.spin.update()
self._click_decrement_arrow() # 1
self.assertEqual(self.spin.get(), '1')
self._click_decrement_arrow() # 1
self.assertEqual(self.spin.get(), '1')
def test_increment(self):
self.spin['from'] = 0
self.spin['to'] = 10
self.spin['increment'] = 4
self.spin.set(1)
self.spin.update()
self._click_increment_arrow() # 5
self.assertEqual(self.spin.get(), '5')
self.spin['increment'] = 2
self.spin.update()
self._click_decrement_arrow() # 3
self.assertEqual(self.spin.get(), '3')
def test_format(self):
self.spin.set(1)
self.spin['format'] = '%10.3f'
self.spin.update()
self._click_increment_arrow()
value = self.spin.get()
self.assertEqual(len(value), 10)
self.assertEqual(value.index('.'), 6)
self.spin['format'] = ''
self.spin.update()
self._click_increment_arrow()
value = self.spin.get()
self.assertTrue('.' not in value)
self.assertEqual(len(value), 1)
def test_wrap(self):
self.spin['to'] = 10
self.spin['from'] = 1
self.spin.set(1)
self.spin['wrap'] = True
self.spin.update()
self._click_decrement_arrow()
self.assertEqual(self.spin.get(), '10')
self._click_increment_arrow()
self.assertEqual(self.spin.get(), '1')
self.spin['wrap'] = False
self.spin.update()
self._click_decrement_arrow()
self.assertEqual(self.spin.get(), '1')
def test_values(self):
self.assertEqual(self.spin['values'],
() if tcl_version < (8, 5) else '')
self.checkParam(self.spin, 'values', 'mon tue wed thur',
expected=('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.spin, 'values', ('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.spin, 'values', (42, 3.14, '', 'any string'))
self.checkParam(self.spin, 'values', '')
self.spin['values'] = ['a', 1, 'c']
# test incrementing / decrementing values
self.spin.set('a')
self.spin.update()
self._click_increment_arrow()
self.assertEqual(self.spin.get(), '1')
self._click_decrement_arrow()
self.assertEqual(self.spin.get(), 'a')
# testing values with empty string set through configure
self.spin.configure(values=[1, '', 2])
self.assertEqual(self.spin['values'],
('1', '', '2') if self.wantobjects else
'1 {} 2')
# testing values with spaces
self.spin['values'] = ['a b', 'a\tb', 'a\nb']
self.assertEqual(self.spin['values'],
('a b', 'a\tb', 'a\nb') if self.wantobjects else
'{a b} {a\tb} {a\nb}')
# testing values with special characters
self.spin['values'] = [r'a\tb', '"a"', '} {']
self.assertEqual(self.spin['values'],
(r'a\tb', '"a"', '} {') if self.wantobjects else
r'a\\tb {"a"} \}\ \{')
# testing creating spinbox with empty string in values
spin2 = ttk.Spinbox(self.root, values=[1, 2, ''])
self.assertEqual(spin2['values'],
('1', '2', '') if self.wantobjects else '1 2 {}')
spin2.destroy()
@add_standard_options(StandardTtkOptionsTests)
class TreeviewTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'columns', 'cursor', 'displaycolumns',
'height', 'padding', 'selectmode', 'show',
'style', 'takefocus', 'xscrollcommand', 'yscrollcommand',
)
def setUp(self):
super().setUp()
self.tv = self.create(padding=0)
def create(self, **kwargs):
return ttk.Treeview(self.root, **kwargs)
def test_columns(self):
widget = self.create()
self.checkParam(widget, 'columns', 'a b c',
expected=('a', 'b', 'c'))
self.checkParam(widget, 'columns', ('a', 'b', 'c'))
self.checkParam(widget, 'columns', '')
def test_displaycolumns(self):
widget = self.create()
widget['columns'] = ('a', 'b', 'c')
self.checkParam(widget, 'displaycolumns', 'b a c',
expected=('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', ('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', '#all',
expected=('#all',))
self.checkParam(widget, 'displaycolumns', (2, 1, 0))
self.checkInvalidParam(widget, 'displaycolumns', ('a', 'b', 'd'),
errmsg='Invalid column index d')
self.checkInvalidParam(widget, 'displaycolumns', (1, 2, 3),
errmsg='Column index 3 out of bounds')
self.checkInvalidParam(widget, 'displaycolumns', (1, -2),
errmsg='Column index -2 out of bounds')
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, -100, 0, '3c', conv=False)
self.checkPixelsParam(widget, 'height', 101.2, 102.6, conv=noconv)
def test_selectmode(self):
widget = self.create()
self.checkEnumParam(widget, 'selectmode',
'none', 'browse', 'extended')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', 'tree headings',
expected=('tree', 'headings'))
self.checkParam(widget, 'show', ('tree', 'headings'))
self.checkParam(widget, 'show', ('headings', 'tree'))
self.checkParam(widget, 'show', 'tree', expected=('tree',))
self.checkParam(widget, 'show', 'headings', expected=('headings',))
def test_bbox(self):
self.tv.pack()
self.assertEqual(self.tv.bbox(''), '')
self.tv.wait_visibility()
self.tv.update()
item_id = self.tv.insert('', 'end')
children = self.tv.get_children()
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
self.assertIsBoundingBox(bbox)
# compare width in bboxes
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
if not self.wantobjects:
root_width = int(root_width)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
# verify that bbox of a closed item is the empty string
child1 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.bbox(child1), '')
def test_children(self):
# no children yet, should get an empty tuple
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
self.assertIsInstance(self.tv.get_children(), tuple)
self.assertEqual(self.tv.get_children()[0], item_id)
# add item_id and child3 as children of child2
child2 = self.tv.insert('', 'end')
child3 = self.tv.insert('', 'end')
self.tv.set_children(child2, item_id, child3)
self.assertEqual(self.tv.get_children(child2), (item_id, child3))
# child3 has child2 as parent, thus trying to set child2 as a children
# of child3 should result in an error
self.assertRaises(tkinter.TclError,
self.tv.set_children, child3, child2)
# remove child2 children
self.tv.set_children(child2)
self.assertEqual(self.tv.get_children(child2), ())
# remove root's children
self.tv.set_children('')
self.assertEqual(self.tv.get_children(), ())
def test_column(self):
# return a dict with all options/values
self.assertIsInstance(self.tv.column('#0'), dict)
# return a single value of the given option
if self.wantobjects:
self.assertIsInstance(self.tv.column('#0', width=None), int)
# set a new value for an option
self.tv.column('#0', width=10)
# testing new way to get option value
self.assertEqual(self.tv.column('#0', 'width'),
10 if self.wantobjects else '10')
self.assertEqual(self.tv.column('#0', width=None),
10 if self.wantobjects else '10')
# check read-only option
self.assertRaises(tkinter.TclError, self.tv.column, '#0', id='X')
self.assertRaises(tkinter.TclError, self.tv.column, 'invalid')
invalid_kws = [
{'unknown_option': 'some value'}, {'stretch': 'wrong'},
{'anchor': 'wrong'}, {'width': 'wrong'}, {'minwidth': 'wrong'}
]
for kw in invalid_kws:
self.assertRaises(tkinter.TclError, self.tv.column, '#0',
**kw)
def test_delete(self):
self.assertRaises(tkinter.TclError, self.tv.delete, '#0')
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
self.tv.delete(item_id)
self.assertFalse(self.tv.get_children())
# reattach should fail
self.assertRaises(tkinter.TclError,
self.tv.reattach, item_id, '', 'end')
# test multiple item delete
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
self.assertEqual(self.tv.get_children(), (item1, item2))
self.tv.delete(item1, item2)
self.assertFalse(self.tv.get_children())
def test_detach_reattach(self):
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
# calling detach without items is valid, although it does nothing
prev = self.tv.get_children()
self.tv.detach() # this should do nothing
self.assertEqual(prev, self.tv.get_children())
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# detach item with children
self.tv.detach(item_id)
self.assertFalse(self.tv.get_children())
# reattach item with children
self.tv.reattach(item_id, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# move a children to the root
self.tv.move(item2, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, item2))
self.assertEqual(self.tv.get_children(item_id), ())
# bad values
self.assertRaises(tkinter.TclError,
self.tv.reattach, 'nonexistent', '', 'end')
self.assertRaises(tkinter.TclError,
self.tv.detach, 'nonexistent')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, 'otherparent', 'end')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, '', 'invalid')
# multiple detach
self.tv.detach(item_id, item2)
self.assertEqual(self.tv.get_children(), ())
self.assertEqual(self.tv.get_children(item_id), ())
def test_exists(self):
self.assertEqual(self.tv.exists('something'), False)
self.assertEqual(self.tv.exists(''), True)
self.assertEqual(self.tv.exists({}), False)
# the following will make a tk.call equivalent to
# tk.call(treeview, "exists") which should result in an error
# in the tcl interpreter since tk requires an item.
self.assertRaises(tkinter.TclError, self.tv.exists, None)
def test_focus(self):
# nothing is focused right now
self.assertEqual(self.tv.focus(), '')
item1 = self.tv.insert('', 'end')
self.tv.focus(item1)
self.assertEqual(self.tv.focus(), item1)
self.tv.delete(item1)
self.assertEqual(self.tv.focus(), '')
# try focusing inexistent item
self.assertRaises(tkinter.TclError, self.tv.focus, 'hi')
def test_heading(self):
# check a dict is returned
self.assertIsInstance(self.tv.heading('#0'), dict)
# check a value is returned
self.tv.heading('#0', text='hi')
self.assertEqual(self.tv.heading('#0', 'text'), 'hi')
self.assertEqual(self.tv.heading('#0', text=None), 'hi')
# invalid option
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
background=None)
# invalid value
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
anchor=1)
def test_heading_callback(self):
def simulate_heading_click(x, y):
simulate_mouse_click(self.tv, x, y)
self.tv.update()
success = [] # no success for now
self.tv.pack()
self.tv.wait_visibility()
self.tv.heading('#0', command=lambda: success.append(True))
self.tv.column('#0', width=100)
self.tv.update()
# assuming that the coords (5, 5) fall into heading #0
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
success = []
commands = self.tv.master._tclCommands
self.tv.heading('#0', command=str(self.tv.heading('#0', command=None)))
self.assertEqual(commands, self.tv.master._tclCommands)
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
# XXX The following raises an error in a tcl interpreter, but not in
# Python
#self.tv.heading('#0', command='I dont exist')
#simulate_heading_click(5, 5)
def test_index(self):
# item 'what' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.index, 'what')
self.assertEqual(self.tv.index(''), 0)
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.index(item1), 0)
self.assertEqual(self.tv.index(c1), 0)
self.assertEqual(self.tv.index(c2), 1)
self.assertEqual(self.tv.index(item2), 1)
self.tv.move(item2, '', 0)
self.assertEqual(self.tv.index(item2), 0)
self.assertEqual(self.tv.index(item1), 1)
# check that index still works even after its parent and siblings
# have been detached
self.tv.detach(item1)
self.assertEqual(self.tv.index(c2), 1)
self.tv.detach(c1)
self.assertEqual(self.tv.index(c2), 0)
# but it fails after item has been deleted
self.tv.delete(item1)
self.assertRaises(tkinter.TclError, self.tv.index, c2)
def test_insert_item(self):
# parent 'none' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.insert, 'none', 'end')
# open values
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='please')
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=True)))
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=False)))
# invalid index
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'middle')
# trying to duplicate item id is invalid
itemid = self.tv.insert('', 'end', 'first-item')
self.assertEqual(itemid, 'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
MockTclObj('first-item'))
# unicode values
value = '\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
self.assertEqual(self.tv.item(item, 'values'),
(value,) if self.wantobjects else value)
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.tv.item(item, values=self.root.splitlist(self.tv.item(item, values=None)))
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.assertIsInstance(self.tv.item(item), dict)
# erase item values
self.tv.item(item, values='')
self.assertFalse(self.tv.item(item, values=None))
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
self.assertEqual(self.tv.item(item, tags=None),
('1', '2', value) if self.wantobjects else
'1 2 %s' % value)
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
self.assertEqual(self.tv.item(item, tags=None),
('1', '2') if self.wantobjects else '1 2')
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
('a b c', '%s %s' % (value, value)) if self.wantobjects else
'{a b c} {%s %s}' % (value, value))
# text
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text="Label here"), text=None),
"Label here")
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text=value), text=None),
value)
# test for values which are not None
itemid = self.tv.insert('', 'end', 0)
self.assertEqual(itemid, '0')
itemid = self.tv.insert('', 'end', 0.0)
self.assertEqual(itemid, '0.0')
# this is because False resolves to 0 and element with 0 iid is already present
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end', False)
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end', '')
def test_selection(self):
self.assertRaises(TypeError, self.tv.selection, 'spam')
# item 'none' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.selection_set, 'none')
self.assertRaises(tkinter.TclError, self.tv.selection_add, 'none')
self.assertRaises(tkinter.TclError, self.tv.selection_remove, 'none')
self.assertRaises(tkinter.TclError, self.tv.selection_toggle, 'none')
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
c3 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.selection(), ())
self.tv.selection_set(c1, item2)
self.assertEqual(self.tv.selection(), (c1, item2))
self.tv.selection_set(c2)
self.assertEqual(self.tv.selection(), (c2,))
self.tv.selection_add(c1, item2)
self.assertEqual(self.tv.selection(), (c1, c2, item2))
self.tv.selection_add(item1)
self.assertEqual(self.tv.selection(), (item1, c1, c2, item2))
self.tv.selection_add()
self.assertEqual(self.tv.selection(), (item1, c1, c2, item2))
self.tv.selection_remove(item1, c3)
self.assertEqual(self.tv.selection(), (c1, c2, item2))
self.tv.selection_remove(c2)
self.assertEqual(self.tv.selection(), (c1, item2))
self.tv.selection_remove()
self.assertEqual(self.tv.selection(), (c1, item2))
self.tv.selection_toggle(c1, c3)
self.assertEqual(self.tv.selection(), (c3, item2))
self.tv.selection_toggle(item2)
self.assertEqual(self.tv.selection(), (c3,))
self.tv.selection_toggle()
self.assertEqual(self.tv.selection(), (c3,))
self.tv.insert('', 'end', id='with spaces')
self.tv.selection_set('with spaces')
self.assertEqual(self.tv.selection(), ('with spaces',))
self.tv.insert('', 'end', id='{brace')
self.tv.selection_set('{brace')
self.assertEqual(self.tv.selection(), ('{brace',))
self.tv.insert('', 'end', id='unicode\u20ac')
self.tv.selection_set('unicode\u20ac')
self.assertEqual(self.tv.selection(), ('unicode\u20ac',))
self.tv.insert('', 'end', id=b'bytes\xe2\x82\xac')
self.tv.selection_set(b'bytes\xe2\x82\xac')
self.assertEqual(self.tv.selection(), ('bytes\xe2\x82\xac',))
self.tv.selection_set()
self.assertEqual(self.tv.selection(), ())
# Old interface
self.tv.selection_set((c1, item2))
self.assertEqual(self.tv.selection(), (c1, item2))
self.tv.selection_add((c1, item1))
self.assertEqual(self.tv.selection(), (item1, c1, item2))
self.tv.selection_remove((item1, c3))
self.assertEqual(self.tv.selection(), (c1, item2))
self.tv.selection_toggle((c1, c3))
self.assertEqual(self.tv.selection(), (c3, item2))
def test_set(self):
self.tv['columns'] = ['A', 'B']
item = self.tv.insert('', 'end', values=['a', 'b'])
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
self.assertEqual(self.tv.item(item, values=None),
('a', 'a') if self.wantobjects else 'a a')
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
self.assertEqual(self.tv.item(item, values=None),
('b', 'a') if self.wantobjects else 'b a')
self.tv.set(item, 'B', 123)
self.assertEqual(self.tv.set(item, 'B'),
123 if self.wantobjects else '123')
self.assertEqual(self.tv.item(item, values=None),
(123, 'a') if self.wantobjects else '123 a')
self.assertEqual(self.tv.set(item),
{'B': 123} if self.wantobjects else {'B': '123'})
# inexistent column
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A')
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A', 'b')
# inexistent item
self.assertRaises(tkinter.TclError, self.tv.set, 'notme')
def test_tag_bind(self):
events = []
item1 = self.tv.insert('', 'end', tags=['call'])
item2 = self.tv.insert('', 'end', tags=['call'])
self.tv.tag_bind('call', '<ButtonPress-1>',
lambda evt: events.append(1))
self.tv.tag_bind('call', '<ButtonRelease-1>',
lambda evt: events.append(2))
self.tv.pack()
self.tv.wait_visibility()
self.tv.update()
pos_y = set()
found = set()
for i in range(0, 100, 10):
if len(found) == 2: # item1 and item2 already found
break
item_id = self.tv.identify_row(i)
if item_id and item_id not in found:
pos_y.add(i)
found.add(item_id)
self.assertEqual(len(pos_y), 2) # item1 and item2 y pos
for y in pos_y:
simulate_mouse_click(self.tv, 0, y)
# by now there should be 4 things in the events list, since each
# item had a bind for two events that were simulated above
self.assertEqual(len(events), 4)
for evt in zip(events[::2], events[1::2]):
self.assertEqual(evt, (1, 2))
def test_tag_configure(self):
# Just testing parameter passing for now
self.assertRaises(TypeError, self.tv.tag_configure)
self.assertRaises(tkinter.TclError, self.tv.tag_configure,
'test', sky='blue')
self.tv.tag_configure('test', foreground='blue')
self.assertEqual(str(self.tv.tag_configure('test', 'foreground')),
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
self.assertIsInstance(self.tv.tag_configure('test'), dict)
def test_tag_has(self):
item1 = self.tv.insert('', 'end', text='Item 1', tags=['tag1'])
item2 = self.tv.insert('', 'end', text='Item 2', tags=['tag2'])
self.assertRaises(TypeError, self.tv.tag_has)
self.assertRaises(TclError, self.tv.tag_has, 'tag1', 'non-existing')
self.assertTrue(self.tv.tag_has('tag1', item1))
self.assertFalse(self.tv.tag_has('tag1', item2))
self.assertFalse(self.tv.tag_has('tag2', item1))
self.assertTrue(self.tv.tag_has('tag2', item2))
self.assertFalse(self.tv.tag_has('tag3', item1))
self.assertFalse(self.tv.tag_has('tag3', item2))
self.assertEqual(self.tv.tag_has('tag1'), (item1,))
self.assertEqual(self.tv.tag_has('tag2'), (item2,))
self.assertEqual(self.tv.tag_has('tag3'), ())
@add_standard_options(StandardTtkOptionsTests)
class SeparatorTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'style', 'takefocus',
# 'state'?
)
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Separator(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class SizegripTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'style', 'takefocus',
# 'state'?
)
def create(self, **kwargs):
return ttk.Sizegrip(self.root, **kwargs)
tests_gui = (
ButtonTest, CheckbuttonTest, ComboboxTest, EntryTest,
FrameTest, LabelFrameTest, LabelTest, MenubuttonTest,
NotebookTest, PanedWindowTest, ProgressbarTest,
RadiobuttonTest, ScaleTest, ScrollbarTest, SeparatorTest,
SizegripTest, SpinboxTest, TreeviewTest, WidgetTest,
)
if __name__ == "__main__":
unittest.main()
| |
"""
The help command. The basic idea is that help texts for commands
are best written by those that write the commands - the admins. So
command-help is all auto-loaded and searched from the current command
set. The normal, database-tied help system is used for collaborative
creation of other help topics such as RP help or game-world aides.
"""
from django.conf import settings
from collections import defaultdict
from evennia.utils.utils import fill, dedent
from evennia.commands.command import Command
from evennia.help.models import HelpEntry
from evennia.utils import create
from evennia.utils.utils import string_suggestions
from evennia.commands.default.muxcommand import MuxCommand
# limit symbol import for API
__all__ = ("CmdHelp", "CmdSetHelp")
_DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
_SEP = "{C" + "-" * _DEFAULT_WIDTH + "{n"
def format_help_entry(title, help_text, aliases=None, suggested=None):
"""
This visually formats the help entry.
"""
string = _SEP + "\n"
if title:
string += "{CHelp for {w%s{n" % title
if aliases:
string += " {C(aliases: %s{C){n" % ("{C,{n ".join("{w%s{n" % ali for ali in aliases))
if help_text:
string += "\n%s" % dedent(help_text.rstrip())
if suggested:
string += "\n\n{CSuggested:{n "
string += "%s" % fill("{C,{n ".join("{w%s{n" % sug for sug in suggested))
string.strip()
string += "\n" + _SEP
return string
def format_help_list(hdict_cmds, hdict_db):
"""
Output a category-ordered list. The input are the
pre-loaded help files for commands and database-helpfiles
resectively.
"""
string = ""
if hdict_cmds and any(hdict_cmds.values()):
string += "\n" + _SEP + "\n {CCommand help entries{n\n" + _SEP
for category in sorted(hdict_cmds.keys()):
string += "\n {w%s{n:\n" % (str(category).title())
string += "{G" + fill(", ".join(sorted(hdict_cmds[category]))) + "{n"
if hdict_db and any(hdict_db.values()):
string += "\n\n" + _SEP + "\n\r {COther help entries{n\n" + _SEP
for category in sorted(hdict_db.keys()):
string += "\n\r {w%s{n:\n" % (str(category).title())
string += "{G" + fill(", ".join(sorted([str(topic) for topic in hdict_db[category]]))) + "{n"
return string
class CmdHelp(Command):
"""
view help or a list of topics
Usage:
help <topic or command>
help list
help all
This will search for help on commands and other
topics related to the game.
"""
key = "help"
locks = "cmd:all()"
# this is a special cmdhandler flag that makes the cmdhandler also pack
# the current cmdset with the call to self.func().
return_cmdset = True
def parse(self):
"""
input is a string containing the command or topic to match.
"""
self.original_args = self.args.strip()
self.args = self.args.strip().lower()
def func(self):
"""
Run the dynamic help entry creator.
"""
query, cmdset = self.args, self.cmdset
caller = self.caller
suggestion_cutoff = 0.6
suggestion_maxnum = 5
if not query:
query = "all"
# removing doublets in cmdset, caused by cmdhandler
# having to allow doublet commands to manage exits etc.
cmdset.make_unique(caller)
# retrieve all available commands and database topics
all_cmds = [cmd for cmd in cmdset if cmd.auto_help and cmd.access(caller)]
all_topics = [topic for topic in HelpEntry.objects.all() if topic.access(caller, 'view', default=True)]
all_categories = list(set([cmd.help_category.lower() for cmd in all_cmds] + [topic.help_category.lower() for topic in all_topics]))
if query in ("list", "all"):
# we want to list all available help entries, grouped by category
hdict_cmd = defaultdict(list)
hdict_topic = defaultdict(list)
# create the dictionaries {category:[topic, topic ...]} required by format_help_list
[hdict_cmd[cmd.help_category].append(cmd.key) for cmd in all_cmds]
[hdict_topic[topic.help_category].append(topic.key) for topic in all_topics]
# report back
self.msg(format_help_list(hdict_cmd, hdict_topic))
return
# Try to access a particular command
# build vocabulary of suggestions and rate them by string similarity.
vocabulary = [cmd.key for cmd in all_cmds if cmd] + [topic.key for topic in all_topics] + all_categories
[vocabulary.extend(cmd.aliases) for cmd in all_cmds]
suggestions = [sugg for sugg in string_suggestions(query, set(vocabulary), cutoff=suggestion_cutoff, maxnum=suggestion_maxnum)
if sugg != query]
if not suggestions:
suggestions = [sugg for sugg in vocabulary if sugg != query and sugg.startswith(query)]
# try an exact command auto-help match
match = [cmd for cmd in all_cmds if cmd == query]
if len(match) == 1:
self.msg(format_help_entry(match[0].key,
match[0].__doc__,
aliases=match[0].aliases,
suggested=suggestions))
return
# try an exact database help entry match
match = list(HelpEntry.objects.find_topicmatch(query, exact=True))
if len(match) == 1:
self.msg(format_help_entry(match[0].key,
match[0].entrytext,
suggested=suggestions))
return
# try to see if a category name was entered
if query in all_categories:
self.msg(format_help_list({query:[cmd.key for cmd in all_cmds if cmd.help_category==query]},
{query:[topic.key for topic in all_topics if topic.help_category==query]}))
return
# no exact matches found. Just give suggestions.
self.msg(format_help_entry("", "No help entry found for '%s'" % query, None, suggested=suggestions))
class CmdSetHelp(MuxCommand):
"""
edit the help database
Usage:
@help[/switches] <topic>[,category[,locks]] = <text>
Switches:
add - add or replace a new topic with text.
append - add text to the end of topic with a newline between.
merge - As append, but don't add a newline between the old
text and the appended text.
delete - remove help topic.
force - (used with add) create help topic also if the topic
already exists.
Examples:
@sethelp/add throw = This throws something at ...
@sethelp/append pickpocketing,Thievery = This steals ...
@sethelp/append pickpocketing, ,attr(is_thief) = This steals ...
This command manipulates the help database. A help entry can be created,
appended/merged to and deleted. If you don't assign a category, the
"General" category will be used. If no lockstring is specified, default
is to let everyone read the help file.
"""
key = "@help"
aliases = "@sethelp"
locks = "cmd:perm(PlayerHelpers)"
help_category = "Building"
def func(self):
"Implement the function"
switches = self.switches
lhslist = self.lhslist
if not self.args:
self.msg("Usage: @sethelp/[add|del|append|merge] <topic>[,category[,locks,..] = <text>")
return
topicstr = ""
category = "General"
lockstring = "view:all()"
try:
topicstr = lhslist[0]
category = lhslist[1]
lockstring = ",".join(lhslist[2:])
except Exception:
pass
if not topicstr:
self.msg("You have to define a topic!")
return
# check if we have an old entry with the same name
try:
old_entry = HelpEntry.objects.get(db_key__iexact=topicstr)
except Exception:
old_entry = None
if 'append' in switches or "merge" in switches:
# merge/append operations
if not old_entry:
self.msg("Could not find topic '%s'. You must give an exact name." % topicstr)
return
if not self.rhs:
self.msg("You must supply text to append/merge.")
return
if 'merge' in switches:
old_entry.entrytext += " " + self.rhs
else:
old_entry.entrytext += "\n%s" % self.rhs
self.msg("Entry updated:\n%s" % old_entry.entrytext)
return
if 'delete' in switches or 'del' in switches:
# delete the help entry
if not old_entry:
self.msg("Could not find topic '%s'" % topicstr)
return
old_entry.delete()
self.msg("Deleted help entry '%s'." % topicstr)
return
# at this point it means we want to add a new help entry.
if not self.rhs:
self.msg("You must supply a help text to add.")
return
if old_entry:
if 'for' in switches or 'force' in switches:
# overwrite old entry
old_entry.key = topicstr
old_entry.entrytext = self.rhs
old_entry.help_category = category
old_entry.locks.clear()
old_entry.locks.add(lockstring)
old_entry.save()
self.msg("Overwrote the old topic '%s' with a new one." % topicstr)
else:
self.msg("Topic '%s' already exists. Use /force to overwrite or /append or /merge to add text to it." % topicstr)
else:
# no old entry. Create a new one.
new_entry = create.create_help_entry(topicstr,
self.rhs, category, lockstring)
if new_entry:
self.msg("Topic '%s' was successfully created." % topicstr)
else:
self.msg("Error when creating topic '%s'! Contact an admin." % topicstr)
| |
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Alexis Mignon <alexis.mignon@gmail.com>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
#
#
# Modified by Neil Marchant <ngmarchant@gmail.com> from the 0.18.X branch.
# Modifications include:
# * Removing Lasso* methods
# * Removing MultiTask* methods
# * Linking to prox_fast extension rather than cd_fast
# * Adding parameters `init_step` and `eta` which are required by the proximal
# gradient method with backtracking
# * Removing parameters associated with coordinate descent: e.g. cyclic, random
# * Removing positive constraint option
#
# License: BSD clause 3
from distutils.version import LooseVersion
from sklearn import __version__ as sklearn_version
import numpy as np
import warnings
from scipy import sparse
new_sklearn_version = (LooseVersion(sklearn_version) > '0.17.1')
if new_sklearn_version:
from sklearn.exceptions import ConvergenceWarning
else:
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.base import (LinearModel, _pre_fit)
from sklearn.base import RegressorMixin
from sklearn.utils.validation import (check_X_y, check_array)
from sklearn.externals.six.moves import xrange
# Cross-validation
if new_sklearn_version:
from sklearn.model_selection import check_cv
from sklearn.linear_model.base import _preprocess_data
else:
from sklearn.cross_validation import check_cv
from sklearn.linear_model.base import (center_data, sparse_center_data)
# Map _preprocess_data to old functions
def _preprocess_data(X, y, fit_intercept, normalize, copy = True,
return_mean = False):
if not return_mean:
return center_data(X, y, fit_intercept, normalize, copy = False)
else:
return sparse_center_data(X, y, fit_intercept, normalize)
from abc import ABCMeta, abstractmethod
from sklearn.externals import six
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.externals.joblib import Parallel, delayed
from sklearn.utils.validation import column_or_1d
from . import prox_fast
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
### My code ###
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, eta=0.5, init_step=10,
adaptive_step=True, n_alphas=100, alphas=None, precompute='auto',
Xy=None, copy_X=True, coef_init=None, verbose=False,
return_n_iter=False, check_input=True, **params):
"""Compute elastic net path with coordinate descent
The optimization function is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
eta : float, optional
Shrinkage parameter for backtracking line search. It must satisfy
0 < eta < 1.
init_step : float, optional
Initial step size used for the backtracking line search. It must be a
positive number.
adaptive_step : boolean, optional, default True
Whether to calculate the optimal step size or use an adaptive step size
chosen through a backtracking line search.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
See also
--------
ElasticNet
ElasticNetCV
"""
# Direct prox_fast to use fixed optimal step size by passing eta = 0 and
# init_step = 0 (which would otherwise be invalid)
if not adaptive_step:
eta_ = 0
init_step_ = 0
else:
eta_ = eta
init_step_ = init_step
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=np.float64, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
# MultiTaskElasticNet does not support sparse matrices
if sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 10000)
dual_gaps = np.empty(n_alphas)
n_iters = []
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if sparse.isspmatrix(X):
model = prox_fast.sparse_enet_prox_gradient(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=np.float64,
order='C')
model = prox_fast.enet_prox_gradient_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter, eta_,
init_step_, tol)
elif precompute is False:
model = prox_fast.enet_prox_gradient(
coef_, l1_reg, l2_reg, X, y, max_iter, eta_, init_step_, tol)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, tol_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > tol_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
class ElasticNet(LinearModel, RegressorMixin) :
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
eta : float, optional
Shrinkage parameter for backtracking line search. It must satisfy
0 < eta < 1.
init_step : float, optional
Initial step size used for the backtracking line search. It must be a
positive number.
adaptive_step : boolean, optional, default True
Whether to calculate the optimal step size or use an adaptive step size
chosen through a backtracking line search.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, eta=0.5, init_step=10,
fit_intercept=True, adaptive_step=True, normalize=False,
precompute=False, max_iter=10000, copy_X=True, tol=1e-4,
warm_start=False):
# Initialise parameters
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.eta = eta
self.adaptive_step = adaptive_step
self.init_step = init_step
self.warm_start = warm_start
self.coef_ = None
self.intercept_ = 0.0
def fit(self, X, y, check_input=True):
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if (isinstance(self.precompute, six.string_types) and
self.precompute == 'auto'):
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
if not (self.eta > 0 and self.eta < 1):
self.eta = 0.5
warnings.warn("Value given for eta is invalid. It must satisfy the "
"constraint 0 < eta < 1. Setting eta to the default "
"value (0.5).",
stacklevel = 2)
if not (self.init_step > 0):
self.init_step = 10
warnings.warn("Value given for init_step is invalid. It must be "
"a positive number. Setting init_step to the default "
"value (10).",
stacklevel = 2)
if check_input:
# Ensure that X and y are float64 Fortran ordered arrays.
# Also check for consistency in the dimensions, and that y doesn't
# contain np.nan or np.inf entries.
y = np.asarray(y, dtype=np.float64)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, dtype=np.float64, order='F', copy=False,
ensure_2d=False)
# Centre and normalise the data
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if not self.warm_start or self.coef_ is None:
# Initial guess for coef_ is zero
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
# Use previous value of coef_ as initial guess
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype = np.float64)
self.n_iter_ = []
# Perform the optimisation
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None, eta = self.eta,
init_step = self.init_step, n_alphas=None,
alphas=[self.alpha], precompute=precompute, Xy=this_Xy,
fit_intercept=False, adaptive_step=self.adaptive_step,
normalize=False, copy_X=True, verbose=False, tol=self.tol,
X_offset=X_offset, X_scale=X_scale, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
# return self for chaining fit and predict calls
return self
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=10000, tol=1e-4,
eta=0.5, init_step=10, adaptive_step=True, copy_X=True,
cv=None, verbose=False, n_jobs=1):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.eta = eta
self.init_step = init_step
self.adaptive_step = adaptive_step
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if not (self.eta > 0 and self.eta < 1):
self.eta = 0.5
warnings.warn("Value given for eta is invalid. It must satisfy the "
"constraint 0 < eta < 1. Setting eta to the default "
"value (0.5).",
stacklevel = 2)
if not (self.init_step > 0):
self.init_step = 10
warnings.warn("Value given for init_step is invalid. It must be "
"a positive number. Setting init_step to the default "
"value (10).",
stacklevel = 2)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
else:
raise ValueError("Multi-task outputs not supported")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
if new_sklearn_version:
cv = check_cv(self.cv)
else:
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
if new_sklearn_version:
folds = list(cv.split(X))
else:
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
eta : float, optional
Shrinkage parameter for backtracking line search. It must satisfy
0 < eta < 1.
init_step : float, optional
Initial step size used for the backtracking line search. It must be a
positive number.
adaptive_step : boolean, optional, default True
Whether to calculate the optimal step size or use an adaptive step size
chosen through a backtracking line search.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=10000, tol=1e-4, eta=0.5, init_step=10,
adaptive_step=True, cv=None, copy_X=True, verbose=0,
n_jobs=1):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.eta = eta
self.init_step = init_step
self.adaptive_step = adaptive_step
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from unittest.mock import MagicMock, Mock, patch
import pytest
from pytest import fixture
from airflow.exceptions import AirflowException
from airflow.models.connection import Connection
from airflow.providers.microsoft.azure.hooks.data_factory import (
AzureDataFactoryHook,
AzureDataFactoryPipelineRunException,
AzureDataFactoryPipelineRunStatus,
provide_targeted_factory,
)
from airflow.utils import db
DEFAULT_RESOURCE_GROUP = "defaultResourceGroup"
RESOURCE_GROUP = "testResourceGroup"
DEFAULT_FACTORY = "defaultFactory"
FACTORY = "testFactory"
MODEL = object()
NAME = "testName"
ID = "testId"
def setup_module():
connection = Connection(
conn_id="azure_data_factory_test",
conn_type="azure_data_factory",
login="clientId",
password="clientSecret",
extra=json.dumps(
{
"extra__azure_data_factory__tenantId": "tenantId",
"extra__azure_data_factory__subscriptionId": "subscriptionId",
"extra__azure_data_factory__resource_group_name": DEFAULT_RESOURCE_GROUP,
"extra__azure_data_factory__factory_name": DEFAULT_FACTORY,
}
),
)
db.merge_conn(connection)
@fixture
def hook():
client = AzureDataFactoryHook(azure_data_factory_conn_id="azure_data_factory_test")
client._conn = MagicMock(
spec=[
"factories",
"linked_services",
"datasets",
"pipelines",
"pipeline_runs",
"triggers",
"trigger_runs",
]
)
return client
def parametrize(explicit_factory, implicit_factory):
def wrapper(func):
return pytest.mark.parametrize(
("user_args", "sdk_args"),
(explicit_factory, implicit_factory),
ids=("explicit factory", "implicit factory"),
)(func)
return wrapper
def test_provide_targeted_factory():
def echo(_, resource_group_name=None, factory_name=None):
return resource_group_name, factory_name
conn = MagicMock()
hook = MagicMock()
hook.get_connection.return_value = conn
conn.extra_dejson = {}
assert provide_targeted_factory(echo)(hook, RESOURCE_GROUP, FACTORY) == (RESOURCE_GROUP, FACTORY)
conn.extra_dejson = {
"extra__azure_data_factory__resource_group_name": DEFAULT_RESOURCE_GROUP,
"extra__azure_data_factory__factory_name": DEFAULT_FACTORY,
}
assert provide_targeted_factory(echo)(hook) == (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)
assert provide_targeted_factory(echo)(hook, RESOURCE_GROUP, None) == (RESOURCE_GROUP, DEFAULT_FACTORY)
assert provide_targeted_factory(echo)(hook, None, FACTORY) == (DEFAULT_RESOURCE_GROUP, FACTORY)
assert provide_targeted_factory(echo)(hook, None, None) == (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)
with pytest.raises(AirflowException):
conn.extra_dejson = {}
provide_targeted_factory(echo)(hook)
@parametrize(
explicit_factory=((RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY)),
implicit_factory=((), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)),
)
def test_get_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_factory(*user_args)
hook._conn.factories.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_create_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_factory(*user_args)
hook._conn.factories.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_update_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._factory_exists = Mock(return_value=True)
hook.update_factory(*user_args)
hook._conn.factories.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_update_factory_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._factory_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Factory .+ does not exist"):
hook.update_factory(*user_args)
@parametrize(
explicit_factory=((RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY)),
implicit_factory=((), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)),
)
def test_delete_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_factory(*user_args)
hook._conn.factories.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_linked_service(*user_args)
hook._conn.linked_services.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_linked_service(*user_args)
hook._conn.linked_services.create_or_update(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._linked_service_exists = Mock(return_value=True)
hook.update_linked_service(*user_args)
hook._conn.linked_services.create_or_update(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_linked_service_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._linked_service_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Linked service .+ does not exist"):
hook.update_linked_service(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_linked_service(*user_args)
hook._conn.linked_services.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_dataset(*user_args)
hook._conn.datasets.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_dataset(*user_args)
hook._conn.datasets.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._dataset_exists = Mock(return_value=True)
hook.update_dataset(*user_args)
hook._conn.datasets.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_dataset_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._dataset_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Dataset .+ does not exist"):
hook.update_dataset(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_dataset(*user_args)
hook._conn.datasets.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_pipeline(*user_args)
hook._conn.pipelines.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_pipeline(*user_args)
hook._conn.pipelines.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._pipeline_exists = Mock(return_value=True)
hook.update_pipeline(*user_args)
hook._conn.pipelines.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_pipeline_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._pipeline_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Pipeline .+ does not exist"):
hook.update_pipeline(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_pipeline(*user_args)
hook._conn.pipelines.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_run_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.run_pipeline(*user_args)
hook._conn.pipelines.create_run.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, ID)),
implicit_factory=((ID,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, ID)),
)
def test_get_pipeline_run(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_pipeline_run(*user_args)
hook._conn.pipeline_runs.get.assert_called_with(*sdk_args)
_wait_for_pipeline_run_status_test_args = [
(AzureDataFactoryPipelineRunStatus.SUCCEEDED, AzureDataFactoryPipelineRunStatus.SUCCEEDED, True),
(AzureDataFactoryPipelineRunStatus.FAILED, AzureDataFactoryPipelineRunStatus.SUCCEEDED, False),
(AzureDataFactoryPipelineRunStatus.CANCELLED, AzureDataFactoryPipelineRunStatus.SUCCEEDED, False),
(AzureDataFactoryPipelineRunStatus.IN_PROGRESS, AzureDataFactoryPipelineRunStatus.SUCCEEDED, "timeout"),
(AzureDataFactoryPipelineRunStatus.QUEUED, AzureDataFactoryPipelineRunStatus.SUCCEEDED, "timeout"),
(AzureDataFactoryPipelineRunStatus.CANCELING, AzureDataFactoryPipelineRunStatus.SUCCEEDED, "timeout"),
(AzureDataFactoryPipelineRunStatus.SUCCEEDED, AzureDataFactoryPipelineRunStatus.TERMINAL_STATUSES, True),
(AzureDataFactoryPipelineRunStatus.FAILED, AzureDataFactoryPipelineRunStatus.TERMINAL_STATUSES, True),
(AzureDataFactoryPipelineRunStatus.CANCELLED, AzureDataFactoryPipelineRunStatus.TERMINAL_STATUSES, True),
]
@pytest.mark.parametrize(
argnames=("pipeline_run_status", "expected_status", "expected_output"),
argvalues=_wait_for_pipeline_run_status_test_args,
ids=[
f"run_status_{argval[0]}_expected_{argval[1]}"
if isinstance(argval[1], str)
else f"run_status_{argval[0]}_expected_AnyTerminalStatus"
for argval in _wait_for_pipeline_run_status_test_args
],
)
def test_wait_for_pipeline_run_status(hook, pipeline_run_status, expected_status, expected_output):
config = {"run_id": ID, "timeout": 3, "check_interval": 1, "expected_statuses": expected_status}
with patch.object(AzureDataFactoryHook, "get_pipeline_run") as mock_pipeline_run:
mock_pipeline_run.return_value.status = pipeline_run_status
if expected_output != "timeout":
assert hook.wait_for_pipeline_run_status(**config) == expected_output
else:
with pytest.raises(AzureDataFactoryPipelineRunException):
hook.wait_for_pipeline_run_status(**config)
@parametrize(
explicit_factory=((ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, ID)),
implicit_factory=((ID,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, ID)),
)
def test_cancel_pipeline_run(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.cancel_pipeline_run(*user_args)
hook._conn.pipeline_runs.cancel.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_trigger(*user_args)
hook._conn.triggers.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_trigger(*user_args)
hook._conn.triggers.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._trigger_exists = Mock(return_value=True)
hook.update_trigger(*user_args)
hook._conn.triggers.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_trigger_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._trigger_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Trigger .+ does not exist"):
hook.update_trigger(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_trigger(*user_args)
hook._conn.triggers.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_start_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.start_trigger(*user_args)
hook._conn.triggers.begin_start.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_stop_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.stop_trigger(*user_args)
hook._conn.triggers.begin_stop.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, ID)),
implicit_factory=((NAME, ID), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, ID)),
)
def test_rerun_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.rerun_trigger(*user_args)
hook._conn.trigger_runs.rerun.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, ID)),
implicit_factory=((NAME, ID), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, ID)),
)
def test_cancel_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.cancel_trigger(*user_args)
hook._conn.trigger_runs.cancel.assert_called_with(*sdk_args)
| |
"""Figure 8 - Df(16)A parameter fits and enrichment model"""
FIG_FORMAT = 'svg'
import matplotlib as mpl
if FIG_FORMAT == 'svg':
mpl.use('agg')
elif FIG_FORMAT == 'pdf':
mpl.use('pdf')
elif FIG_FORMAT == 'interactive':
mpl.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import cPickle as pickle
import seaborn.apionly as sns
import lab.misc as misc
import lab.misc.splines as splines
import os
import sys
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..', 'enrichment_model'))
import enrichment_model_data as emd
import enrichment_model_plotting as emp
import Df16a_analysis as df
Df_color = df.Df_color
markers = df.markers
_, Df_marker = df.markers
save_dir = df.fig_save_dir
filename = 'Fig8_Df_model.{}'.format(FIG_FORMAT)
simulations_path = os.path.join(
df.data_path, 'enrichment_model',
'WT_Df_enrichment_model_simulation_C.pkl')
params_path = os.path.join(
df.data_path, 'enrichment_model', 'Df_model_params_C.pkl')
def main():
Df_raw_data, Df_data = emd.load_data(
'df', root=os.path.join(df.data_path, 'enrichment_model'))
Df_params = pickle.load(open(params_path))
fig = plt.figure(figsize=(8.5, 11))
gs = plt.GridSpec(
3, 2, left=0.1, bottom=0.5, right=0.5, top=0.9, wspace=0.3,
hspace=0.3)
Df_recur_ax = fig.add_subplot(gs[0, 0])
Df_shift_ax = fig.add_subplot(gs[0, 1])
shift_compare_ax = fig.add_subplot(gs[1, 0])
var_compare_ax = fig.add_subplot(gs[1, 1])
enrichment_ax = fig.add_subplot(gs[2, 0])
final_enrichment_ax = fig.add_subplot(gs[2, 1])
#
# Recurrence by position
#
recur_x_vals = np.linspace(-np.pi, np.pi, 1000)
Df_recur_data = emd.recurrence_by_position(Df_data, method='cv')
Df_recur_knots = np.linspace(
-np.pi, np.pi, Df_params['position_recurrence']['n_knots'])
Df_recur_spline = splines.CyclicSpline(Df_recur_knots)
Df_recur_N = Df_recur_spline.design_matrix(recur_x_vals)
Df_recur_fit = splines.prob(
Df_params['position_recurrence']['theta'], Df_recur_N)
Df_recur_boots_fits = [splines.prob(boot, Df_recur_N) for boot in
Df_params['position_recurrence']['boots_theta']]
Df_recur_ci_up_fit = np.percentile(Df_recur_boots_fits, 95, axis=0)
Df_recur_ci_low_fit = np.percentile(Df_recur_boots_fits, 5, axis=0)
Df_recur_ax.plot(recur_x_vals, Df_recur_fit, color=Df_color)
Df_recur_ax.fill_between(
recur_x_vals, Df_recur_ci_low_fit, Df_recur_ci_up_fit,
facecolor=Df_color, alpha=0.5)
sns.regplot(
Df_recur_data[:, 0], Df_recur_data[:, 1], ax=Df_recur_ax,
color=Df_color, y_jitter=0.2, fit_reg=False, scatter_kws={'s': 1},
marker=Df_marker)
Df_recur_ax.set_xlim(-np.pi, np.pi)
Df_recur_ax.set_xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi])
Df_recur_ax.set_xticklabels(['-0.50', '-0.25', '0', '0.25', '0.50'])
Df_recur_ax.set_ylim(-0.3, 1.3)
Df_recur_ax.set_yticks([0, 0.5, 1])
Df_recur_ax.tick_params(length=3, pad=1, top=False)
Df_recur_ax.set_xlabel('Initial distance from reward (fraction of belt)')
Df_recur_ax.set_ylabel('Place cell recurrence probability')
Df_recur_ax.set_title('')
Df_recur_ax_2 = Df_recur_ax.twinx()
Df_recur_ax_2.set_ylim(-0.3, 1.3)
Df_recur_ax_2.set_yticks([0, 1])
Df_recur_ax_2.set_yticklabels(['non-recur', 'recur'])
Df_recur_ax_2.tick_params(length=3, pad=1, top=False)
#
# Place field stability
#
shift_x_vals = np.linspace(-np.pi, np.pi, 1000)
Df_shift_knots = Df_params['position_stability']['all_pairs']['knots']
Df_shift_spline = splines.CyclicSpline(Df_shift_knots)
Df_shift_N = Df_shift_spline.design_matrix(shift_x_vals)
Df_shift_theta_b = Df_params['position_stability']['all_pairs']['theta_b']
Df_shift_b_fit = np.dot(Df_shift_N, Df_shift_theta_b)
Df_shift_theta_k = Df_params['position_stability']['all_pairs']['theta_k']
Df_shift_k_fit = splines.get_k(Df_shift_theta_k, Df_shift_N)
Df_shift_fit_var = 1. / Df_shift_k_fit
Df_shift_data = emd.paired_activity_centroid_distance_to_reward(Df_data)
Df_shift_data = Df_shift_data.dropna()
Df_shifts = Df_shift_data['second'] - Df_shift_data['first']
Df_shifts[Df_shifts < -np.pi] += 2 * np.pi
Df_shifts[Df_shifts >= np.pi] -= 2 * np.pi
Df_shift_ax.plot(shift_x_vals, Df_shift_b_fit, color=Df_color)
Df_shift_ax.fill_between(
shift_x_vals, Df_shift_b_fit - Df_shift_fit_var,
Df_shift_b_fit + Df_shift_fit_var, facecolor=Df_color, alpha=0.5)
sns.regplot(
Df_shift_data['first'], Df_shifts, ax=Df_shift_ax, color=Df_color,
fit_reg=False, scatter_kws={'s': 1}, marker=Df_marker)
Df_shift_ax.axvline(ls='--', color='0.4', lw=0.5)
Df_shift_ax.axhline(ls='--', color='0.4', lw=0.5)
Df_shift_ax.plot([-np.pi, np.pi], [np.pi, -np.pi], color='g', ls=':', lw=2)
Df_shift_ax.tick_params(length=3, pad=1, top=False)
Df_shift_ax.set_xlabel('Initial distance from reward (fraction of belt)')
Df_shift_ax.set_ylabel(r'$\Delta$ position (fraction of belt)')
Df_shift_ax.set_xlim(-np.pi, np.pi)
Df_shift_ax.set_xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi])
Df_shift_ax.set_xticklabels(['-0.50', '-0.25', '0', '0.25', '0.50'])
Df_shift_ax.set_ylim(-np.pi, np.pi)
Df_shift_ax.set_yticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi])
Df_shift_ax.set_yticklabels(['-0.50', '-0.25', '0', '0.25', '0.50'])
Df_shift_ax.set_title('')
#
# Stability by distance to reward
#
shift_x_vals = np.linspace(-np.pi, np.pi, 1000)
Df_shift_knots = Df_params['position_stability']['all_pairs']['knots']
Df_shift_spline = splines.CyclicSpline(Df_shift_knots)
Df_shift_N = Df_shift_spline.design_matrix(shift_x_vals)
Df_shift_theta_b = Df_params['position_stability']['all_pairs']['theta_b']
Df_shift_b_fit = np.dot(Df_shift_N, Df_shift_theta_b)
Df_shift_boots_b_fit = [
np.dot(Df_shift_N, boot) for boot in
Df_params['position_stability']['all_pairs']['boots_theta_b']]
Df_shift_b_ci_up_fit = np.percentile(Df_shift_boots_b_fit, 95, axis=0)
Df_shift_b_ci_low_fit = np.percentile(Df_shift_boots_b_fit, 5, axis=0)
shift_compare_ax.plot(shift_x_vals, Df_shift_b_fit, color=Df_color)
shift_compare_ax.fill_between(
shift_x_vals, Df_shift_b_ci_low_fit, Df_shift_b_ci_up_fit,
facecolor=Df_color, alpha=0.5)
shift_compare_ax.axvline(ls='--', color='0.4', lw=0.5)
shift_compare_ax.axhline(ls='--', color='0.4', lw=0.5)
shift_compare_ax.tick_params(length=3, pad=1, top=False)
shift_compare_ax.set_xlim(-np.pi, np.pi)
shift_compare_ax.set_xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi])
shift_compare_ax.set_xticklabels(['-0.50', '-0.25', '0', '0.25', '0.50'])
shift_compare_ax.set_ylim(-0.10 * 2 * np.pi, 0.10 * 2 * np.pi)
y_ticks = np.array(['-0.10', '-0.05', '0', '0.05', '0.10'])
shift_compare_ax.set_yticks(y_ticks.astype('float') * 2 * np.pi)
shift_compare_ax.set_yticklabels(y_ticks)
shift_compare_ax.set_xlabel(
'Initial distance from reward (fraction of belt)')
shift_compare_ax.set_ylabel(r'$\Delta$ position (fraction of belt)')
Df_shift_theta_k = Df_params['position_stability']['all_pairs']['theta_k']
Df_shift_k_fit = splines.get_k(Df_shift_theta_k, Df_shift_N)
Df_shift_boots_k_fit = [splines.get_k(
boot, Df_shift_N) for boot in
Df_params['position_stability']['all_pairs']['boots_theta_k']]
Df_shift_k_ci_up_fit = np.percentile(Df_shift_boots_k_fit, 95, axis=0)
Df_shift_k_ci_low_fit = np.percentile(Df_shift_boots_k_fit, 5, axis=0)
var_compare_ax.plot(shift_x_vals, 1. / Df_shift_k_fit, color=Df_color)
var_compare_ax.fill_between(
shift_x_vals, 1. / Df_shift_k_ci_low_fit, 1. / Df_shift_k_ci_up_fit,
facecolor=Df_color, alpha=0.5)
var_compare_ax.axvline(ls='--', color='0.4', lw=0.5)
var_compare_ax.tick_params(length=3, pad=1, top=False)
var_compare_ax.set_xlim(-np.pi, np.pi)
var_compare_ax.set_xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi])
var_compare_ax.set_xticklabels(['-0.50', '-0.25', '0', '0.25', '0.50'])
y_ticks = np.array(['0.005', '0.010', '0.015', '0.020'])
var_compare_ax.set_yticks(y_ticks.astype('float') * (2 * np.pi) ** 2)
var_compare_ax.set_yticklabels(y_ticks)
var_compare_ax.set_xlabel(
'Initial distance from reward (fraction of belt)')
var_compare_ax.set_ylabel(r'$\Delta$ position variance')
#
# Enrichment
#
m = pickle.load(open(simulations_path))
Df_enrich = emp.calc_enrichment(m['Df_no_swap_pos'], m['Df_no_swap_masks'])
emp.plot_enrichment(
enrichment_ax, Df_enrich, Df_color, title='', rad=False)
enrichment_ax.set_xlabel("Iteration ('session' #)")
#
# Final Enrichment
#
Df_no_swap_final_dist = emp.calc_final_distributions(
m['Df_no_swap_pos'], m['Df_no_swap_masks'])
emp.plot_final_distributions(
final_enrichment_ax, [Df_no_swap_final_dist], [Df_color], title='',
rad=False)
misc.save_figure(fig, filename, save_dir=save_dir)
plt.close('all')
if __name__ == '__main__':
main()
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import itertools
import eventlet
import six
from heat.common.i18n import repr_wrapper
from heat.common import timeutils
from heat.engine import dependencies
from heat.engine import scheduler
from heat.tests import common
class DummyTask(object):
def __init__(self, num_steps=3, delays=None):
self.num_steps = num_steps
if delays is not None:
self.delays = iter(delays)
else:
self.delays = itertools.repeat(None)
def __call__(self, *args, **kwargs):
for i in range(1, self.num_steps + 1):
self.do_step(i, *args, **kwargs)
yield next(self.delays)
def do_step(self, step_num, *args, **kwargs):
pass
class ExceptionGroupTest(common.HeatTestCase):
def test_contains_exceptions(self):
exception_group = scheduler.ExceptionGroup()
self.assertIsInstance(exception_group.exceptions, list)
def test_can_be_initialized_with_a_list_of_exceptions(self):
ex1 = Exception("ex 1")
ex2 = Exception("ex 2")
exception_group = scheduler.ExceptionGroup([ex1, ex2])
self.assertIn(ex1, exception_group.exceptions)
self.assertIn(ex2, exception_group.exceptions)
def test_can_add_exceptions_after_init(self):
ex = Exception()
exception_group = scheduler.ExceptionGroup()
exception_group.exceptions.append(ex)
self.assertIn(ex, exception_group.exceptions)
def test_str_representation_aggregates_all_exceptions(self):
ex1 = Exception("ex 1")
ex2 = Exception("ex 2")
exception_group = scheduler.ExceptionGroup([ex1, ex2])
self.assertEqual("['ex 1', 'ex 2']", six.text_type(exception_group))
class DependencyTaskGroupTest(common.HeatTestCase):
def setUp(self):
super(DependencyTaskGroupTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
self.aggregate_exceptions = False
self.error_wait_time = None
self.reverse_order = False
@contextlib.contextmanager
def _dep_test(self, *edges):
dummy = DummyTask(getattr(self, 'steps', 3))
deps = dependencies.Dependencies(edges)
tg = scheduler.DependencyTaskGroup(
deps, dummy, reverse=self.reverse_order,
error_wait_time=self.error_wait_time,
aggregate_exceptions=self.aggregate_exceptions)
self.m.StubOutWithMock(dummy, 'do_step')
yield dummy
self.m.ReplayAll()
scheduler.TaskRunner(tg)(wait_time=None)
def test_no_steps(self):
self.steps = 0
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
with self._dep_test(('second', 'first')):
pass
def test_single_node(self):
with self._dep_test(('only', None)) as dummy:
dummy.do_step(1, 'only').AndReturn(None)
dummy.do_step(2, 'only').AndReturn(None)
dummy.do_step(3, 'only').AndReturn(None)
def test_disjoint(self):
with self._dep_test(('1', None), ('2', None)) as dummy:
dummy.do_step(1, '1').InAnyOrder('1')
dummy.do_step(1, '2').InAnyOrder('1')
dummy.do_step(2, '1').InAnyOrder('2')
dummy.do_step(2, '2').InAnyOrder('2')
dummy.do_step(3, '1').InAnyOrder('3')
dummy.do_step(3, '2').InAnyOrder('3')
def test_single_fwd(self):
with self._dep_test(('second', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'second').AndReturn(None)
dummy.do_step(2, 'second').AndReturn(None)
dummy.do_step(3, 'second').AndReturn(None)
def test_chain_fwd(self):
with self._dep_test(('third', 'second'),
('second', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'second').AndReturn(None)
dummy.do_step(2, 'second').AndReturn(None)
dummy.do_step(3, 'second').AndReturn(None)
dummy.do_step(1, 'third').AndReturn(None)
dummy.do_step(2, 'third').AndReturn(None)
dummy.do_step(3, 'third').AndReturn(None)
def test_diamond_fwd(self):
with self._dep_test(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'first'), ('mid2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'mid1').InAnyOrder('1')
dummy.do_step(1, 'mid2').InAnyOrder('1')
dummy.do_step(2, 'mid1').InAnyOrder('2')
dummy.do_step(2, 'mid2').InAnyOrder('2')
dummy.do_step(3, 'mid1').InAnyOrder('3')
dummy.do_step(3, 'mid2').InAnyOrder('3')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_complex_fwd(self):
with self._dep_test(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'mid3'), ('mid1', 'first'),
('mid3', 'first'), ('mid2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'mid2').InAnyOrder('1')
dummy.do_step(1, 'mid3').InAnyOrder('1')
dummy.do_step(2, 'mid2').InAnyOrder('2')
dummy.do_step(2, 'mid3').InAnyOrder('2')
dummy.do_step(3, 'mid2').InAnyOrder('3')
dummy.do_step(3, 'mid3').InAnyOrder('3')
dummy.do_step(1, 'mid1').AndReturn(None)
dummy.do_step(2, 'mid1').AndReturn(None)
dummy.do_step(3, 'mid1').AndReturn(None)
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_many_edges_fwd(self):
with self._dep_test(('last', 'e1'), ('last', 'mid1'), ('last', 'mid2'),
('mid1', 'e2'), ('mid1', 'mid3'),
('mid2', 'mid3'),
('mid3', 'e3')) as dummy:
dummy.do_step(1, 'e1').InAnyOrder('1edges')
dummy.do_step(1, 'e2').InAnyOrder('1edges')
dummy.do_step(1, 'e3').InAnyOrder('1edges')
dummy.do_step(2, 'e1').InAnyOrder('2edges')
dummy.do_step(2, 'e2').InAnyOrder('2edges')
dummy.do_step(2, 'e3').InAnyOrder('2edges')
dummy.do_step(3, 'e1').InAnyOrder('3edges')
dummy.do_step(3, 'e2').InAnyOrder('3edges')
dummy.do_step(3, 'e3').InAnyOrder('3edges')
dummy.do_step(1, 'mid3').AndReturn(None)
dummy.do_step(2, 'mid3').AndReturn(None)
dummy.do_step(3, 'mid3').AndReturn(None)
dummy.do_step(1, 'mid2').InAnyOrder('1mid')
dummy.do_step(1, 'mid1').InAnyOrder('1mid')
dummy.do_step(2, 'mid2').InAnyOrder('2mid')
dummy.do_step(2, 'mid1').InAnyOrder('2mid')
dummy.do_step(3, 'mid2').InAnyOrder('3mid')
dummy.do_step(3, 'mid1').InAnyOrder('3mid')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_dbldiamond_fwd(self):
with self._dep_test(('last', 'a1'), ('last', 'a2'),
('a1', 'b1'), ('a2', 'b1'), ('a2', 'b2'),
('b1', 'first'), ('b2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'b1').InAnyOrder('1b')
dummy.do_step(1, 'b2').InAnyOrder('1b')
dummy.do_step(2, 'b1').InAnyOrder('2b')
dummy.do_step(2, 'b2').InAnyOrder('2b')
dummy.do_step(3, 'b1').InAnyOrder('3b')
dummy.do_step(3, 'b2').InAnyOrder('3b')
dummy.do_step(1, 'a1').InAnyOrder('1a')
dummy.do_step(1, 'a2').InAnyOrder('1a')
dummy.do_step(2, 'a1').InAnyOrder('2a')
dummy.do_step(2, 'a2').InAnyOrder('2a')
dummy.do_step(3, 'a1').InAnyOrder('3a')
dummy.do_step(3, 'a2').InAnyOrder('3a')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_circular_deps(self):
d = dependencies.Dependencies([('first', 'second'),
('second', 'third'),
('third', 'first')])
self.assertRaises(dependencies.CircularDependencyException,
scheduler.DependencyTaskGroup, d)
def test_aggregate_exceptions_raises_all_at_the_end(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.aggregate_exceptions = True
tasks = (('A', None), ('B', None), ('C', None))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(1, 'C').InAnyOrder('1').AndRaise(e1)
dummy.do_step(2, 'A').InAnyOrder('2')
dummy.do_step(2, 'B').InAnyOrder('2').AndRaise(e2)
dummy.do_step(3, 'A').InAnyOrder('3')
e1 = Exception('e1')
e2 = Exception('e2')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1, e2)
self.assertEqual(set([e1, e2]), set(exc.exceptions))
def test_aggregate_exceptions_cancels_dependent_tasks_recursively(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.aggregate_exceptions = True
tasks = (('A', None), ('B', 'A'), ('C', 'B'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').AndRaise(e1)
e1 = Exception('e1')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1)
self.assertEqual([e1], exc.exceptions)
def test_aggregate_exceptions_cancels_tasks_in_reverse_order(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.reverse_order = True
self.aggregate_exceptions = True
tasks = (('A', None), ('B', 'A'), ('C', 'B'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'C').AndRaise(e1)
e1 = Exception('e1')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1)
self.assertEqual([e1], exc.exceptions)
def test_exception_grace_period(self):
e1 = Exception('e1')
def run_tasks_with_exceptions():
self.error_wait_time = 5
tasks = (('A', None), ('B', None), ('C', 'A'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(2, 'A').InAnyOrder('2').AndRaise(e1)
dummy.do_step(2, 'B').InAnyOrder('2')
dummy.do_step(3, 'B')
exc = self.assertRaises(type(e1), run_tasks_with_exceptions)
self.assertEqual(e1, exc)
def test_exception_grace_period_expired(self):
e1 = Exception('e1')
def run_tasks_with_exceptions():
self.steps = 5
self.error_wait_time = 0.05
def sleep():
eventlet.sleep(self.error_wait_time)
tasks = (('A', None), ('B', None), ('C', 'A'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(2, 'A').InAnyOrder('2').AndRaise(e1)
dummy.do_step(2, 'B').InAnyOrder('2')
dummy.do_step(3, 'B')
dummy.do_step(4, 'B').WithSideEffects(sleep)
exc = self.assertRaises(type(e1), run_tasks_with_exceptions)
self.assertEqual(e1, exc)
class TaskTest(common.HeatTestCase):
def setUp(self):
super(TaskTest, self).setUp()
scheduler.ENABLE_SLEEP = True
self.addCleanup(self.m.VerifyAll)
def test_run(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_run_delays(self):
task = DummyTask(delays=itertools.repeat(2))
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_run_delays_dynamic(self):
task = DummyTask(delays=[2, 4, 1])
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_run_wait_time(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(42).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(42).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)(wait_time=42)
def test_start_run(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
runner.run_to_completion()
def test_start_run_wait_time(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(24).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(24).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
runner.run_to_completion(wait_time=24)
def test_sleep(self):
sleep_time = 42
self.m.StubOutWithMock(eventlet, 'sleep')
eventlet.sleep(0).AndReturn(None)
eventlet.sleep(sleep_time).MultipleTimes().AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=sleep_time)
def test_sleep_zero(self):
self.m.StubOutWithMock(eventlet, 'sleep')
eventlet.sleep(0).MultipleTimes().AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=0)
def test_sleep_none(self):
self.m.StubOutWithMock(eventlet, 'sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=None)
def test_args(self):
args = ['foo', 'bar']
kwargs = {'baz': 'quux', 'blarg': 'wibble'}
self.m.StubOutWithMock(DummyTask, '__call__')
task = DummyTask()
task(*args, **kwargs)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task, *args, **kwargs)
runner(wait_time=None)
def test_non_callable(self):
self.assertRaises(AssertionError, scheduler.TaskRunner, object())
def test_stepping(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertFalse(runner.step())
self.assertTrue(runner)
self.assertFalse(runner.step())
self.assertTrue(runner.step())
self.assertFalse(runner)
def test_start_no_steps(self):
task = DummyTask(0)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_start_only(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
def test_double_start(self):
runner = scheduler.TaskRunner(DummyTask())
runner.start()
self.assertRaises(AssertionError, runner.start)
def test_start_cancelled(self):
runner = scheduler.TaskRunner(DummyTask())
runner.cancel()
self.assertRaises(AssertionError, runner.start)
def test_call_double_start(self):
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=None)
self.assertRaises(AssertionError, runner.start)
def test_start_function(self):
def task():
pass
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.started())
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_repeated_done(self):
task = DummyTask(0)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.step())
self.assertTrue(runner.step())
def test_timeout(self):
st = timeutils.wallclock()
def task():
while True:
yield
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertRaises(scheduler.Timeout, runner.step)
def test_timeout_return(self):
st = timeutils.wallclock()
def task():
while True:
try:
yield
except scheduler.Timeout:
return
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertTrue(runner.step())
self.assertFalse(runner)
def test_timeout_swallowed(self):
st = timeutils.wallclock()
def task():
while True:
try:
yield
except scheduler.Timeout:
yield
self.fail('Task still running')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertTrue(runner.step())
self.assertFalse(runner)
self.assertTrue(runner.step())
def test_cancel_not_started(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.cancel()
self.assertTrue(runner.done())
def test_cancel_done(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertTrue(runner.step())
self.assertTrue(runner.done())
runner.cancel()
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_cancel(self):
task = DummyTask(3)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel()
self.assertTrue(runner.step())
def test_cancel_grace_period(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=1.0)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertTrue(runner.step())
def test_cancel_grace_period_before_timeout(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.1)
task.do_step(1).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start(timeout=10)
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=1.0)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertTrue(runner.step())
def test_cancel_grace_period_after_timeout(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.1)
task.do_step(1).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start(timeout=1.25)
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=3)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertRaises(scheduler.Timeout, runner.step)
def test_cancel_grace_period_not_started(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.cancel(grace_period=0.5)
self.assertTrue(runner.done())
class TimeoutTest(common.HeatTestCase):
def test_compare(self):
task = scheduler.TaskRunner(DummyTask())
earlier = scheduler.Timeout(task, 10)
eventlet.sleep(0.01)
later = scheduler.Timeout(task, 10)
self.assertTrue(earlier < later)
self.assertTrue(later > earlier)
self.assertEqual(earlier, earlier)
self.assertNotEqual(earlier, later)
class DescriptionTest(common.HeatTestCase):
def setUp(self):
super(DescriptionTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
def test_func(self):
def f():
pass
self.assertEqual('f', scheduler.task_description(f))
def test_lambda(self):
l = lambda: None
self.assertEqual('<lambda>', scheduler.task_description(l))
def test_method(self):
class C(object):
def __str__(self):
return 'C "o"'
def __repr__(self):
return 'o'
def m(self):
pass
self.assertEqual('m from C "o"', scheduler.task_description(C().m))
def test_object(self):
class C(object):
def __str__(self):
return 'C "o"'
def __repr__(self):
return 'o'
def __call__(self):
pass
self.assertEqual('o', scheduler.task_description(C()))
def test_unicode(self):
@repr_wrapper
@six.python_2_unicode_compatible
class C(object):
def __str__(self):
return u'C "\u2665"'
def __repr__(self):
return u'\u2665'
def __call__(self):
pass
def m(self):
pass
self.assertEqual(u'm from C "\u2665"',
scheduler.task_description(C().m))
self.assertEqual(u'\u2665',
scheduler.task_description(C()))
class WrapperTaskTest(common.HeatTestCase):
def setUp(self):
super(WrapperTaskTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
def test_wrap(self):
child_tasks = [DummyTask() for i in range(3)]
@scheduler.wrappertask
def task():
for child_task in child_tasks:
yield child_task()
yield
for child_task in child_tasks:
self.m.StubOutWithMock(child_task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
scheduler.TaskRunner._sleep(0).AndReturn(None)
for child_task in child_tasks:
child_task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
child_task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
child_task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_parent_yield_value(self):
@scheduler.wrappertask
def parent_task():
yield
yield 3
yield iter([1, 2, 4])
task = parent_task()
self.assertIsNone(next(task))
self.assertEqual(3, next(task))
self.assertEqual([1, 2, 4], list(next(task)))
def test_child_yield_value(self):
def child_task():
yield
yield 3
yield iter([1, 2, 4])
@scheduler.wrappertask
def parent_task():
yield child_task()
task = parent_task()
self.assertIsNone(next(task))
self.assertEqual(3, next(task))
self.assertEqual([1, 2, 4], list(next(task)))
def test_child_exception(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
raise
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(MyException, next, task)
def test_child_exception_exit(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
return
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(StopIteration, next, task)
def test_child_exception_swallow(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
yield
else:
self.fail('No exception raised in parent_task')
yield
task = parent_task()
next(task)
next(task)
def test_child_exception_swallow_next(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
dummy = DummyTask()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
pass
else:
self.fail('No exception raised in parent_task')
yield dummy()
task = parent_task()
next(task)
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
for i in range(1, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_swallow_next(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
yield dummy()
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
yield child_task()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_raise(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
raise
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
yield dummy()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_exit(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
return
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
yield child_task()
yield dummy()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_parent_exception(self):
class MyException(Exception):
pass
def child_task():
yield
@scheduler.wrappertask
def parent_task():
yield child_task()
raise MyException()
task = parent_task()
next(task)
self.assertRaises(MyException, next, task)
def test_parent_throw(self):
class MyException(Exception):
pass
@scheduler.wrappertask
def parent_task():
try:
yield DummyTask()()
except MyException:
raise
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(MyException, task.throw, MyException())
def test_parent_throw_exit(self):
class MyException(Exception):
pass
@scheduler.wrappertask
def parent_task():
try:
yield DummyTask()()
except MyException:
return
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(StopIteration, task.throw, MyException())
def test_parent_cancel(self):
@scheduler.wrappertask
def parent_task():
try:
yield
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_parent_cancel_exit(self):
@scheduler.wrappertask
def parent_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel(self):
def child_task():
try:
yield
except GeneratorExit:
raise
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel_exit(self):
def child_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel_parent_exit(self):
def child_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
return
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
| |
"""Execute main parser."""
import time
from datetime import datetime as date
import os
from os.path import sep
import imp
from . import supplement, tree, qfunctions, modify, setpaths
from .__init__ import __version__
def execute_parser(args):
"""
Initiate the interpretation and conversion process.
Args:
args (ArgumentParser): arguments parsed through m2cpp
"""
builder = tree.builder.Builder(
disp=args.disp,
comments=args.comments,
original=args.original,
enable_omp=args.enable_omp,
enable_tbb=args.enable_tbb,
reference=args.reference,
)
paths_from_file = []
#read setpath.m file and return string list of paths
if args.paths_file:
if os.path.isfile(args.paths_file):
paths_from_file = setpaths.multiple_folder_paths(args.paths_file)
else:
raise IOError("File '" + args.paths_file + "' not found")
#pathOne = os.path.dirname(os.path.abspath(args.filename))
if os.path.isfile(args.filename):
paths = [os.path.abspath(os.path.dirname(args.filename))]
paths += paths_from_file
if args.disp:
print("building tree...")
filenames = [os.path.abspath(args.filename)]
stack = []
while filenames:
filename = filenames.pop(0)
assert os.path.isfile(filename)
if filename in stack:
continue
if args.disp:
print("loading", filename)
stack.append(filename)
f = open(filename, "rU")
code = f.read()
f.close()
#code = re.sub('%#', '##', code)
#Here you have to change filename to current folder for .py files
#local_name = pathOne + sep + os.path.basename(filename)
local_name = os.getcwd() + sep + os.path.basename(filename)
if os.path.isfile(local_name + ".py") and not args.reset:
try:
cfg = imp.load_source("cfg", local_name + ".py")
except:
raise ImportError("""Supplement file:
%s.py
is formated incorrectly. Change the format or convert with '-r' option to create
a new file.""" % local_name)
if "verbatims" in cfg.__dict__ and cfg.verbatims:
verbatims = cfg.verbatims
code = supplement.verbatim.set(verbatims, code)
builder.load(filename, code)
program = builder[-1]
if "functions" in cfg.__dict__:
funcs = program.ftypes
for name in funcs.keys():
if name in cfg.functions:
for key in cfg.functions[name].keys():
funcs[name][key] = cfg.functions[name][key]
program.ftypes = funcs
if "structs" in cfg.__dict__:
structs = program.stypes
for name in structs.keys():
if name in cfg.structs:
for key in cfg.structs[name].keys():
structs[name][key] = cfg.structs[name][key]
program.stypes = structs
if "includes" in cfg.__dict__:
includes = program.itypes
for key in cfg.includes:
if key not in includes:
includes.append(key)
includes = [i for i in includes if supplement.includes.write_to_includes(i)]
program.itypes = includes
else:
builder.load(filename, code)
program = builder[-1]
# add unknown variables to stack if they exists as files
unknowns = builder.get_unknowns(filename)
for i in range(len(unknowns)-1, -1, -1):
#print(i)
for path in paths:
#print(path)
if os.path.isfile(path + sep + unknowns[i] + ".m"):
unknowns[i] = unknowns[i] + ".m"
if os.path.isfile(path + sep + unknowns[i]):
program.include(path + sep + unknowns[i])
#filenames.append(path + sep + unknowns.pop(i))
filenames.append(path + sep + unknowns[i])
else:
builder.load("unnamed", args.filename)
program = builder[-1]
#--- work in progress ---
#Run this mlabwrap code
#Have this in a try-except block
#import mwrapmat
#wrapmat = mwrapmat.Wrapmat()
#wrapmat.eval_code(builder)
#------------------------
#--- work in progress ---
#Get data types from matlab
if args.matlab_suggest:
from . import matlab_types
builder = matlab_types.mtypes(builder, args)
#------------------------
if args.disp:
print("configure tree")
builder.configure(suggest=(2*args.suggest or args.matlab_suggest))
#--- work in progress ---
#Modify the Abstract Syntax Tree (AST)
builder.project = modify.preorder_transform_AST(
builder.project, args.nargin,
suggest=(2*args.suggest or args.matlab_suggest),
)
#------------------------
if args.disp:
print(builder.project.summary())
print("generate translation")
builder.project.translate(args)
#post order modify project
builder.project = modify.postorder_transform_AST(builder.project)
t = time.time()
stamp = date.fromtimestamp(t).strftime('%Y-%m-%d %H:%M:%S')
for program in builder.project:
#name = program.name
#if os.path.isfile(args.filename):
# name = pathOne + sep + os.path.basename(name)
#print(name)
name = os.getcwd() + sep + os.path.basename(program.name)
#print(name)
cpp = qfunctions.qcpp(program)
hpp = qfunctions.qhpp(program)
py = qfunctions.qpy(program, prefix=True)
log = qfunctions.qlog(program)
if args.disp:
print("Writing files...")
if args.reset:
for ext in [".cpp", ".hpp", ".log", ".py"]:
if os.path.isfile(name+ext):
os.remove(name+ext)
if cpp:
cpp = """// Automatically translated using m2cpp %s on %s
%s""" % (__version__, stamp, cpp)
f = open(name+".cpp", "w")
f.write(cpp)
f.close()
if hpp:
hpp = """// Automatically translated using m2cpp %s on %s
%s""" % (__version__, stamp, hpp)
f = open(name+".hpp", "w")
f.write(hpp)
f.close()
if log:
log = "Automatically translated using m2cpp %s on %s\n\n%s"\
% (__version__, stamp, log)
f = open(name+".log", "w")
f.write(log)
f.close()
if py:
py = """# Automatically translated using m2cpp %s on %s
#
%s""" % (__version__, stamp, py)
f = open(name+".py", "w")
f.write(py)
f.close()
if os.path.isfile(name+".pyc"):
os.remove(name+".pyc")
program = builder[0]
if args.tree_full:
print(program.summary(args))
elif args.tree:
if program[1][0].cls == "Main":
print(program[1][0][3].summary(args))
else:
print(program[1].summary(args))
elif args.line:
nodes = program[1].flatten(False, False, False)
for node_ in nodes:
if node_.line == args.line and node_.cls != "Block":
print(node_.str.replace("__percent__", "%"))
break
else:
print(program[1].str.replace("__percent__", "%"))
| |
# sql/ddl.py
# Copyright (C) 2009-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provides the hierarchy of DDL-defining schema items as well as routines
to invoke them for a create/drop call.
"""
from .. import util
from .elements import ClauseElement
from .base import Executable, _generative, SchemaVisitor, _bind_or_error
from ..util import topological
from .. import event
from .. import exc
class _DDLCompiles(ClauseElement):
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.ddl_compiler(dialect, self, **kw)
class DDLElement(Executable, _DDLCompiles):
"""Base class for DDL expression constructs.
This class is the base for the general purpose :class:`.DDL` class,
as well as the various create/drop clause constructs such as
:class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`,
etc.
:class:`.DDLElement` integrates closely with SQLAlchemy events,
introduced in :ref:`event_toplevel`. An instance of one is
itself an event receiving callable::
event.listen(
users,
'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql')
)
.. seealso::
:class:`.DDL`
:class:`.DDLEvents`
:ref:`event_toplevel`
:ref:`schema_ddl_sequences`
"""
_execution_options = Executable.\
_execution_options.union({'autocommit': True})
target = None
on = None
dialect = None
callable_ = None
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_ddl(self, multiparams, params)
def execute(self, bind=None, target=None):
"""Execute this DDL immediately.
Executes the DDL statement in isolation using the supplied
:class:`.Connectable` or
:class:`.Connectable` assigned to the ``.bind``
property, if not supplied. If the DDL has a conditional ``on``
criteria, it will be invoked with None as the event.
:param bind:
Optional, an ``Engine`` or ``Connection``. If not supplied, a valid
:class:`.Connectable` must be present in the
``.bind`` property.
:param target:
Optional, defaults to None. The target SchemaItem for the
execute call. Will be passed to the ``on`` callable if any,
and may also provide string expansion data for the
statement. See ``execute_at`` for more information.
"""
if bind is None:
bind = _bind_or_error(self)
if self._should_execute(target, bind):
return bind.execute(self.against(target))
else:
bind.engine.logger.info(
"DDL execution skipped, criteria not met.")
@util.deprecated("0.7", "See :class:`.DDLEvents`, as well as "
":meth:`.DDLElement.execute_if`.")
def execute_at(self, event_name, target):
"""Link execution of this DDL to the DDL lifecycle of a SchemaItem.
Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance,
executing it when that schema item is created or dropped. The DDL
statement will be executed using the same Connection and transactional
context as the Table create/drop itself. The ``.bind`` property of
this statement is ignored.
:param event:
One of the events defined in the schema item's ``.ddl_events``;
e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop'
:param target:
The Table or MetaData instance for which this DDLElement will
be associated with.
A DDLElement instance can be linked to any number of schema items.
``execute_at`` builds on the ``append_ddl_listener`` interface of
:class:`.MetaData` and :class:`.Table` objects.
Caveat: Creating or dropping a Table in isolation will also trigger
any DDL set to ``execute_at`` that Table's MetaData. This may change
in a future release.
"""
def call_event(target, connection, **kw):
if self._should_execute_deprecated(event_name,
target, connection, **kw):
return connection.execute(self.against(target))
event.listen(target, "" + event_name.replace('-', '_'), call_event)
@_generative
def against(self, target):
"""Return a copy of this DDL against a specific schema item."""
self.target = target
@_generative
def execute_if(self, dialect=None, callable_=None, state=None):
"""Return a callable that will execute this
DDLElement conditionally.
Used to provide a wrapper for event listening::
event.listen(
metadata,
'before_create',
DDL("my_ddl").execute_if(dialect='postgresql')
)
:param dialect: May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something').execute_if(dialect='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something').execute_if(dialect=('postgresql', 'mysql'))
:param callable_: A callable, which will be invoked with
four positional arguments as well as optional keyword
arguments:
:ddl:
This DDL element.
:target:
The :class:`.Table` or :class:`.MetaData` object which is the
target of this event. May be None if the DDL is executed
explicitly.
:bind:
The :class:`.Connection` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
:state:
Optional keyword argument - will be the ``state`` argument
passed to this function.
:checkfirst:
Keyword argument, will be True if the 'checkfirst' flag was
set during the call to ``create()``, ``create_all()``,
``drop()``, ``drop_all()``.
If the callable returns a true value, the DDL statement will be
executed.
:param state: any value which will be passed to the callable\_
as the ``state`` keyword argument.
.. seealso::
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
self.dialect = dialect
self.callable_ = callable_
self.state = state
def _should_execute(self, target, bind, **kw):
if self.on is not None and \
not self._should_execute_deprecated(None, target, bind, **kw):
return False
if isinstance(self.dialect, util.string_types):
if self.dialect != bind.engine.name:
return False
elif isinstance(self.dialect, (tuple, list, set)):
if bind.engine.name not in self.dialect:
return False
if (self.callable_ is not None and
not self.callable_(self, target, bind,
state=self.state, **kw)):
return False
return True
def _should_execute_deprecated(self, event, target, bind, **kw):
if self.on is None:
return True
elif isinstance(self.on, util.string_types):
return self.on == bind.engine.name
elif isinstance(self.on, (tuple, list, set)):
return bind.engine.name in self.on
else:
return self.on(self, event, target, bind, **kw)
def __call__(self, target, bind, **kw):
"""Execute the DDL as a ddl_listener."""
if self._should_execute(target, bind, **kw):
return bind.execute(self.against(target))
def _check_ddl_on(self, on):
if (on is not None and
(not isinstance(on, util.string_types + (tuple, list, set)) and
not util.callable(on))):
raise exc.ArgumentError(
"Expected the name of a database dialect, a tuple "
"of names, or a callable for "
"'on' criteria, got type '%s'." % type(on).__name__)
def bind(self):
if self._bind:
return self._bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class DDL(DDLElement):
"""A literal DDL statement.
Specifies literal SQL DDL to be executed by the database. DDL objects
function as DDL event listeners, and can be subscribed to those events
listed in :class:`.DDLEvents`, using either :class:`.Table` or
:class:`.MetaData` objects as targets. Basic templating support allows
a single DDL instance to handle repetitive tasks for multiple tables.
Examples::
from sqlalchemy import event, DDL
tbl = Table('users', metadata, Column('uid', Integer))
event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
connection.execute(drop_spow)
When operating on Table events, the following ``statement``
string substitions are available::
%(table)s - the Table name, with any required quoting applied
%(schema)s - the schema name, with any required quoting applied
%(fullname)s - the Table name including schema, quoted if needed
The DDL's "context", if any, will be combined with the standard
substitutions noted above. Keys present in the context will override
the standard substitutions.
"""
__visit_name__ = "ddl"
def __init__(self, statement, on=None, context=None, bind=None):
"""Create a DDL statement.
:param statement:
A string or unicode string to be executed. Statements will be
processed with Python's string formatting operator. See the
``context`` argument and the ``execute_at`` method.
A literal '%' in a statement must be escaped as '%%'.
SQL bind parameters are not available in DDL statements.
:param on:
.. deprecated:: 0.7
See :meth:`.DDLElement.execute_if`.
Optional filtering criteria. May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something', on='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something', on=('postgresql', 'mysql'))
If a callable, it will be invoked with four positional arguments
as well as optional keyword arguments:
:ddl:
This DDL element.
:event:
The name of the event that has triggered this DDL, such as
'after-create' Will be None if the DDL is executed explicitly.
:target:
The ``Table`` or ``MetaData`` object which is the target of
this event. May be None if the DDL is executed explicitly.
:connection:
The ``Connection`` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
If the callable returns a true value, the DDL statement will be
executed.
:param context:
Optional dictionary, defaults to None. These values will be
available for use in string substitutions on the DDL statement.
:param bind:
Optional. A :class:`.Connectable`, used by
default when ``execute()`` is invoked without a bind argument.
.. seealso::
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
if not isinstance(statement, util.string_types):
raise exc.ArgumentError(
"Expected a string or unicode SQL statement, got '%r'" %
statement)
self.statement = statement
self.context = context or {}
self._check_ddl_on(on)
self.on = on
self._bind = bind
def __repr__(self):
return '<%s@%s; %s>' % (
type(self).__name__, id(self),
', '.join([repr(self.statement)] +
['%s=%r' % (key, getattr(self, key))
for key in ('on', 'context')
if getattr(self, key)]))
class _CreateDropBase(DDLElement):
"""Base class for DDL constructs that represent CREATE and DROP or
equivalents.
The common theme of _CreateDropBase is a single
``element`` attribute which refers to the element
to be created or dropped.
"""
def __init__(self, element, on=None, bind=None):
self.element = element
self._check_ddl_on(on)
self.on = on
self.bind = bind
def _create_rule_disable(self, compiler):
"""Allow disable of _create_rule using a callable.
Pass to _create_rule using
util.portable_instancemethod(self._create_rule_disable)
to retain serializability.
"""
return False
class CreateSchema(_CreateDropBase):
"""Represent a CREATE SCHEMA statement.
.. versionadded:: 0.7.4
The argument here is the string name of the schema.
"""
__visit_name__ = "create_schema"
def __init__(self, name, quote=None, **kw):
"""Create a new :class:`.CreateSchema` construct."""
self.quote = quote
super(CreateSchema, self).__init__(name, **kw)
class DropSchema(_CreateDropBase):
"""Represent a DROP SCHEMA statement.
The argument here is the string name of the schema.
.. versionadded:: 0.7.4
"""
__visit_name__ = "drop_schema"
def __init__(self, name, quote=None, cascade=False, **kw):
"""Create a new :class:`.DropSchema` construct."""
self.quote = quote
self.cascade = cascade
super(DropSchema, self).__init__(name, **kw)
class CreateTable(_CreateDropBase):
"""Represent a CREATE TABLE statement."""
__visit_name__ = "create_table"
def __init__(
self, element, on=None, bind=None,
include_foreign_key_constraints=None):
"""Create a :class:`.CreateTable` construct.
:param element: a :class:`.Table` that's the subject
of the CREATE
:param on: See the description for 'on' in :class:`.DDL`.
:param bind: See the description for 'bind' in :class:`.DDL`.
:param include_foreign_key_constraints: optional sequence of
:class:`.ForeignKeyConstraint` objects that will be included
inline within the CREATE construct; if omitted, all foreign key
constraints that do not specify use_alter=True are included.
.. versionadded:: 1.0.0
"""
super(CreateTable, self).__init__(element, on=on, bind=bind)
self.columns = [CreateColumn(column)
for column in element.columns
]
self.include_foreign_key_constraints = include_foreign_key_constraints
class _DropView(_CreateDropBase):
"""Semi-public 'DROP VIEW' construct.
Used by the test suite for dialect-agnostic drops of views.
This object will eventually be part of a public "view" API.
"""
__visit_name__ = "drop_view"
class CreateColumn(_DDLCompiles):
"""Represent a :class:`.Column` as rendered in a CREATE TABLE statement,
via the :class:`.CreateTable` construct.
This is provided to support custom column DDL within the generation
of CREATE TABLE statements, by using the
compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel`
to extend :class:`.CreateColumn`.
Typical integration is to examine the incoming :class:`.Column`
object, and to redirect compilation if a particular flag or condition
is found::
from sqlalchemy import schema
from sqlalchemy.ext.compiler import compiles
@compiles(schema.CreateColumn)
def compile(element, compiler, **kw):
column = element.element
if "special" not in column.info:
return compiler.visit_create_column(element, **kw)
text = "%s SPECIAL DIRECTIVE %s" % (
column.name,
compiler.type_compiler.process(column.type)
)
default = compiler.get_column_default_string(column)
if default is not None:
text += " DEFAULT " + default
if not column.nullable:
text += " NOT NULL"
if column.constraints:
text += " ".join(
compiler.process(const)
for const in column.constraints)
return text
The above construct can be applied to a :class:`.Table` as follows::
from sqlalchemy import Table, Metadata, Column, Integer, String
from sqlalchemy import schema
metadata = MetaData()
table = Table('mytable', MetaData(),
Column('x', Integer, info={"special":True}, primary_key=True),
Column('y', String(50)),
Column('z', String(20), info={"special":True})
)
metadata.create_all(conn)
Above, the directives we've added to the :attr:`.Column.info` collection
will be detected by our custom compilation scheme::
CREATE TABLE mytable (
x SPECIAL DIRECTIVE INTEGER NOT NULL,
y VARCHAR(50),
z SPECIAL DIRECTIVE VARCHAR(20),
PRIMARY KEY (x)
)
The :class:`.CreateColumn` construct can also be used to skip certain
columns when producing a ``CREATE TABLE``. This is accomplished by
creating a compilation rule that conditionally returns ``None``.
This is essentially how to produce the same effect as using the
``system=True`` argument on :class:`.Column`, which marks a column
as an implicitly-present "system" column.
For example, suppose we wish to produce a :class:`.Table` which skips
rendering of the Postgresql ``xmin`` column against the Postgresql
backend, but on other backends does render it, in anticipation of a
triggered rule. A conditional compilation rule could skip this name only
on Postgresql::
from sqlalchemy.schema import CreateColumn
@compiles(CreateColumn, "postgresql")
def skip_xmin(element, compiler, **kw):
if element.element.name == 'xmin':
return None
else:
return compiler.visit_create_column(element, **kw)
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('xmin', Integer)
)
Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE``
which only includes the ``id`` column in the string; the ``xmin`` column
will be omitted, but only against the Postgresql backend.
.. versionadded:: 0.8.3 The :class:`.CreateColumn` construct supports
skipping of columns by returning ``None`` from a custom compilation
rule.
.. versionadded:: 0.8 The :class:`.CreateColumn` construct was added
to support custom column creation styles.
"""
__visit_name__ = 'create_column'
def __init__(self, element):
self.element = element
class DropTable(_CreateDropBase):
"""Represent a DROP TABLE statement."""
__visit_name__ = "drop_table"
class CreateSequence(_CreateDropBase):
"""Represent a CREATE SEQUENCE statement."""
__visit_name__ = "create_sequence"
class DropSequence(_CreateDropBase):
"""Represent a DROP SEQUENCE statement."""
__visit_name__ = "drop_sequence"
class CreateIndex(_CreateDropBase):
"""Represent a CREATE INDEX statement."""
__visit_name__ = "create_index"
class DropIndex(_CreateDropBase):
"""Represent a DROP INDEX statement."""
__visit_name__ = "drop_index"
class AddConstraint(_CreateDropBase):
"""Represent an ALTER TABLE ADD CONSTRAINT statement."""
__visit_name__ = "add_constraint"
def __init__(self, element, *args, **kw):
super(AddConstraint, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
class DropConstraint(_CreateDropBase):
"""Represent an ALTER TABLE DROP CONSTRAINT statement."""
__visit_name__ = "drop_constraint"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropConstraint, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
class DDLBase(SchemaVisitor):
def __init__(self, connection):
self.connection = connection
class SchemaGenerator(DDLBase):
def __init__(self, dialect, connection, checkfirst=False,
tables=None, **kwargs):
super(SchemaGenerator, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def _can_create_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection._get_effective_schema(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or \
not self.dialect.has_table(self.connection,
table.name, schema=effective_schema)
def _can_create_sequence(self, sequence):
effective_schema = self.connection._get_effective_schema(sequence)
return self.dialect.supports_sequences and \
(
(not self.dialect.sequences_optional or
not sequence.optional) and
(
not self.checkfirst or
not self.dialect.has_sequence(
self.connection,
sequence.name,
schema=effective_schema)
)
)
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
collection = sort_tables_and_constraints(
[t for t in tables if self._can_create_table(t)])
seq_coll = [s for s in metadata._sequences.values()
if s.column is None and self._can_create_sequence(s)]
event_collection = [
t for (t, fks) in collection if t is not None
]
metadata.dispatch.before_create(metadata, self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
for seq in seq_coll:
self.traverse_single(seq, create_ok=True)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table, create_ok=True,
include_foreign_key_constraints=fkcs,
_is_metadata_operation=True)
else:
for fkc in fkcs:
self.traverse_single(fkc)
metadata.dispatch.after_create(metadata, self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
def visit_table(
self, table, create_ok=False,
include_foreign_key_constraints=None,
_is_metadata_operation=False):
if not create_ok and not self._can_create_table(table):
return
table.dispatch.before_create(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
if not self.dialect.supports_alter:
# e.g., don't omit any foreign key constraints
include_foreign_key_constraints = None
self.connection.execute(
CreateTable(
table,
include_foreign_key_constraints=include_foreign_key_constraints
))
if hasattr(table, 'indexes'):
for index in table.indexes:
self.traverse_single(index)
table.dispatch.after_create(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(AddConstraint(constraint))
def visit_sequence(self, sequence, create_ok=False):
if not create_ok and not self._can_create_sequence(sequence):
return
self.connection.execute(CreateSequence(sequence))
def visit_index(self, index):
self.connection.execute(CreateIndex(index))
class SchemaDropper(DDLBase):
def __init__(self, dialect, connection, checkfirst=False,
tables=None, **kwargs):
super(SchemaDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
try:
unsorted_tables = [t for t in tables if self._can_drop_table(t)]
collection = list(reversed(
sort_tables_and_constraints(
unsorted_tables,
filter_fn=lambda constraint: False
if not self.dialect.supports_alter
or constraint.name is None
else None
)
))
except exc.CircularDependencyError as err2:
if not self.dialect.supports_alter:
util.warn(
"Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s, and backend does "
"not support ALTER. To restore at least a partial sort, "
"apply use_alter=True to ForeignKey and "
"ForeignKeyConstraint "
"objects involved in the cycle to mark these as known "
"cycles that will be ignored."
% (
", ".join(sorted([t.fullname for t in err2.cycles]))
)
)
collection = [(t, ()) for t in unsorted_tables]
else:
util.raise_from_cause(
exc.CircularDependencyError(
err2.args[0],
err2.cycles, err2.edges,
msg="Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s. Please ensure "
"that the ForeignKey and ForeignKeyConstraint objects "
"involved in the cycle have "
"names so that they can be dropped using "
"DROP CONSTRAINT."
% (
", ".join(sorted([t.fullname for t in err2.cycles]))
)
)
)
seq_coll = [
s
for s in metadata._sequences.values()
if s.column is None and self._can_drop_sequence(s)
]
event_collection = [
t for (t, fks) in collection if t is not None
]
metadata.dispatch.before_drop(
metadata, self.connection, tables=event_collection,
checkfirst=self.checkfirst, _ddl_runner=self)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table, drop_ok=True, _is_metadata_operation=True)
else:
for fkc in fkcs:
self.traverse_single(fkc)
for seq in seq_coll:
self.traverse_single(seq, drop_ok=True)
metadata.dispatch.after_drop(
metadata, self.connection, tables=event_collection,
checkfirst=self.checkfirst, _ddl_runner=self)
def _can_drop_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection._get_effective_schema(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or self.dialect.has_table(
self.connection, table.name, schema=effective_schema)
def _can_drop_sequence(self, sequence):
effective_schema = self.connection._get_effective_schema(sequence)
return self.dialect.supports_sequences and \
((not self.dialect.sequences_optional or
not sequence.optional) and
(not self.checkfirst or
self.dialect.has_sequence(
self.connection,
sequence.name,
schema=effective_schema))
)
def visit_index(self, index):
self.connection.execute(DropIndex(index))
def visit_table(self, table, drop_ok=False, _is_metadata_operation=False):
if not drop_ok and not self._can_drop_table(table):
return
table.dispatch.before_drop(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
self.connection.execute(DropTable(table))
table.dispatch.after_drop(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(DropConstraint(constraint))
def visit_sequence(self, sequence, drop_ok=False):
if not drop_ok and not self._can_drop_sequence(sequence):
return
self.connection.execute(DropSequence(sequence))
def sort_tables(tables, skip_fn=None, extra_dependencies=None):
"""sort a collection of :class:`.Table` objects based on dependency.
This is a dependency-ordered sort which will emit :class:`.Table`
objects such that they will follow their dependent :class:`.Table` objects.
Tables are dependent on another based on the presence of
:class:`.ForeignKeyConstraint` objects as well as explicit dependencies
added by :meth:`.Table.add_is_dependent_on`.
.. warning::
The :func:`.sort_tables` function cannot by itself accommodate
automatic resolution of dependency cycles between tables, which
are usually caused by mutually dependent foreign key constraints.
To resolve these cycles, either the
:paramref:`.ForeignKeyConstraint.use_alter` parameter may be appled
to those constraints, or use the
:func:`.sql.sort_tables_and_constraints` function which will break
out foreign key constraints involved in cycles separately.
:param tables: a sequence of :class:`.Table` objects.
:param skip_fn: optional callable which will be passed a
:class:`.ForeignKey` object; if it returns True, this
constraint will not be considered as a dependency. Note this is
**different** from the same parameter in
:func:`.sort_tables_and_constraints`, which is
instead passed the owning :class:`.ForeignKeyConstraint` object.
:param extra_dependencies: a sequence of 2-tuples of tables which will
also be considered as dependent on each other.
.. seealso::
:func:`.sort_tables_and_constraints`
:meth:`.MetaData.sorted_tables` - uses this function to sort
"""
if skip_fn is not None:
def _skip_fn(fkc):
for fk in fkc.elements:
if skip_fn(fk):
return True
else:
return None
else:
_skip_fn = None
return [
t for (t, fkcs) in
sort_tables_and_constraints(
tables, filter_fn=_skip_fn, extra_dependencies=extra_dependencies)
if t is not None
]
def sort_tables_and_constraints(
tables, filter_fn=None, extra_dependencies=None):
"""sort a collection of :class:`.Table` / :class:`.ForeignKeyConstraint`
objects.
This is a dependency-ordered sort which will emit tuples of
``(Table, [ForeignKeyConstraint, ...])`` such that each
:class:`.Table` follows its dependent :class:`.Table` objects.
Remaining :class:`.ForeignKeyConstraint` objects that are separate due to
dependency rules not satisifed by the sort are emitted afterwards
as ``(None, [ForeignKeyConstraint ...])``.
Tables are dependent on another based on the presence of
:class:`.ForeignKeyConstraint` objects, explicit dependencies
added by :meth:`.Table.add_is_dependent_on`, as well as dependencies
stated here using the :paramref:`~.sort_tables_and_constraints.skip_fn`
and/or :paramref:`~.sort_tables_and_constraints.extra_dependencies`
parameters.
:param tables: a sequence of :class:`.Table` objects.
:param filter_fn: optional callable which will be passed a
:class:`.ForeignKeyConstraint` object, and returns a value based on
whether this constraint should definitely be included or excluded as
an inline constraint, or neither. If it returns False, the constraint
will definitely be included as a dependency that cannot be subject
to ALTER; if True, it will **only** be included as an ALTER result at
the end. Returning None means the constraint is included in the
table-based result unless it is detected as part of a dependency cycle.
:param extra_dependencies: a sequence of 2-tuples of tables which will
also be considered as dependent on each other.
.. versionadded:: 1.0.0
.. seealso::
:func:`.sort_tables`
"""
fixed_dependencies = set()
mutable_dependencies = set()
if extra_dependencies is not None:
fixed_dependencies.update(extra_dependencies)
remaining_fkcs = set()
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
remaining_fkcs.add(fkc)
continue
if filter_fn:
filtered = filter_fn(fkc)
if filtered is True:
remaining_fkcs.add(fkc)
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.add((dependent_on, table))
fixed_dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
try:
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies), tables,
deterministic_order=True
)
)
except exc.CircularDependencyError as err:
for edge in err.edges:
if edge in mutable_dependencies:
table = edge[1]
can_remove = [
fkc for fkc in table.foreign_key_constraints
if filter_fn is None or filter_fn(fkc) is not False]
remaining_fkcs.update(can_remove)
for fkc in can_remove:
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.discard((dependent_on, table))
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies), tables,
deterministic_order=True
)
)
return [
(table, table.foreign_key_constraints.difference(remaining_fkcs))
for table in candidate_sort
] + [(None, list(remaining_fkcs))]
| |
"""
Classification in Spark
@author: Chris Mantas
@contact: the1pro@gmail.com
@since: Created on 2016-02-12
@todo: custom formats, break up big lines
@license: http://www.apache.org/licenses/LICENSE-2.0 Apache License
"""
from pyspark.mllib.classification import \
LogisticRegressionWithLBFGS, LogisticRegressionModel
from imr_tools import *
from pyspark import SparkContext
from argparse import ArgumentParser
from os import system
# ==================== Global Vars ============================ #
_INTERCEPT = True
_REGULARIZATION = None
# ==================== Helper Functions ====================== #
def train_model(training_rdd, **kwargs):
"""
Train a classifier model using an rdd training dataset
:param training_rdd: the rdd of the training dataset
:param kwargs: additional key-value params for the training (if any)
:return:
"""
return LogisticRegressionWithLBFGS.train(training_rdd,
regType=_REGULARIZATION,
intercept=_INTERCEPT,
**kwargs)
def get_cli_args():
"""
Defines the command-line arguments, parses the input and returns a
namespace object with the parameters given by the user
:return:
"""
cli_parser = ArgumentParser(description='Classification on Spark')
cli_parser.add_argument("operation",
help="the operation to run: 'train' or 'classify'")
cli_parser.add_argument("input",
help="the input dataset (formatted as a tuple)")
cli_parser.add_argument("-m", '--model', required=True,
help="a csv file holding the model weights")
cli_parser.add_argument("-o", '--output',
help="the output location "
"(in case of classify op)")
cli_parser.add_argument("-l", '--labels', required=True,
help="a json file holding all labels in a dataset")
cli_parser.add_argument("-c", '--category', type=int, default=1,
help="which category (label) to use [1-3]")
cli_parser.add_argument("-u", "--update", action='store_true',
default=False, help="update a pre-existing model")
cli_parser.add_argument("-e", "--evaluate", action='store_true',
default=False,
help="cross-evaluate the model on 20%% of input")
return cli_parser.parse_args()
def get_spark_context(appname="spark_job"):
# ---------------- Start Spark Job ---------------- #
print("=============> {} <=============".format(appname))
if "sc" not in globals(): # init the spark context
print("---> Init. Spark Context")
sc = SparkContext(appName=appname)
print("---> OK")
return sc
def calculate_error(valid_rdd, model):
"""
Calculate the error ratio of a given model on a given RDD dataset
:param valid_rdd: an RDD of LabeledPoints
:param model: a classification model
:return: a float in the [0-1] range
"""
label_and_preds = valid_rdd.map(
lambda p: (p.label, model.predict(p.features))
)
erroneous = label_and_preds.filter(lambda (l, p): l != p)
return erroneous.count() / float(valid_rdd.count())
# ==================== Spark Jobs ====================== #
def perform_train_job(sc, input_path, l_encoder,
initial_model=None, evaluate=False, category=1):
"""
Trains a Linear Regression model and returns its weights
:param input_path: the input file-name (local or HDFS)
:param l_encoder: The label encoder
:param initial_model: the initial LR model we will be enhancing
:param evaluate: Whether or not to cross-evaluate the model on a 20% split
:param category: the label category
:return: a list of model weights for a Logistic Regression Model
"""
tuple_data = sc.textFile(input_path).map(parse_line) # an RDD of tuples
# an RDD of Labeled Points (with encoded labels)
all_data = tuple_data.map(
lambda e: tuple_to_labeled_point(e, category, l_encoder))
# choose the training data
if evaluate:
# split the dataset in training and validation sets
(training_data, validation_data) = all_data.randomSplit([0.8, 0.2])
else:
# use the whole dataset
training_data = all_data
if initial_model:
print("---> Updating Classification Model")
else:
print("---> Training Classification Model")
# ---------------- Do the training ---------------- #
weights = initial_model.weights if initial_model else None
model = train_model(training_data, numClasses=len(l_encoder.classes_),
initialWeights=weights)
print(" > Done")
# calculate training error
print("---> Calculating Training Error")
error = calculate_error(training_data, model)
print(" > "+str(error))
# if evaluate is given, calculate the evaluation error too
if evaluate:
print("---> Calculating Evaluation Error")
error = calculate_error(validation_data, model)
print(" > "+str(error))
return model
def perform_classification_job(sc, input_path, encoder, model,
output_path):
"""
Classifies the entries of a dataset based on a model created from the
weights given as an input
:param input_path: the input (text) dataset location
:param encoder: The label encoder for this category
:param model: The LR model
:param output_path: The output dataset location
:return: None
"""
raw_data = sc.textFile(input_path) # the raw text input RDD
# A parser function.
# It r just needs to take the features from a text line
parsing_mapper_func = get_features_from_line
# an RDD of feature vectors
print("---> Parsing Input")
feature_entries = raw_data.map(parsing_mapper_func)
print("---> OK")
def classifying_func(features):
"""
A closure (function) using classify_line and the model and encoder
that are available in this scope
:param features:
:return:
"""
return classify_line(features, model, encoder)
# ---------------- Do the classification ---------------- #
print("---> Classifying the Input")
labeled_entries = feature_entries.map(classifying_func)
print(" > Done")
# save the output as a text file
labeled_entries.saveAsTextFile(output_path)
# ======================= MAIN ========================= #
if __name__ == "__main__":
# get the command-line arguments
args = get_cli_args()
# create a label encoder from a local json file that contains the set
l_encoder = label_encoders_from_json_file(args.labels, args.category)
# --------------- Choose the Operation to perform =-------------- #
if args.operation.lower() == "train":
# get/create spark context
sc = get_spark_context("Train/Update LR Model")
# load initial weights if it's an update operation
init_model = None
if args.update:
print("---> Loading model")
LogisticRegressionModel.load(sc, args.model)
print("---> OK")
# do the train job
model = perform_train_job(sc, args.input, l_encoder,
initial_model=init_model,
evaluate=args.evaluate,
category=args.category)
# save the model weights as a csv file
try:
system("hdfs dfs -rm -r " + args.model)
except:
print "Failed to delete model: ", args.model
print("---> Saving LR model")
model.save(sc, args.model)
print("---> OK")
elif args.operation.lower() == "classify":
if not args.output:
raise Exception("for classify operation, an output needs to be"
"specified")
sc = get_spark_context("Classify (Logistic Regression)")
# load the model
print("---> Loading model")
model = LogisticRegressionModel.load(sc, args.model)
print("---> OK")
# do the classification job
perform_classification_job(sc, args.input, l_encoder,
model, args.output)
else:
print("I do not know operation: "+args.operation)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.